xenium
allocation_tracker.hpp
1//
2// Copyright (c) 2018-2020 Manuel Pöter.
3// Licensed under the MIT License. See LICENSE file in the project root for full license information.
4//
5
6#ifndef XENIUM_DETAIL_ALLOCATION_TRACKER_HPP
7#define XENIUM_DETAIL_ALLOCATION_TRACKER_HPP
8
9#ifndef TRACK_ALLOCATIONS
10namespace xenium { namespace reclamation { namespace detail {
11 template <typename>
12 struct tracked_object {};
13}}}
14
15#define ALLOCATION_COUNTER(tracker)
16#define ALLOCATION_TRACKER
17#define ALLOCATION_TRACKING_FUNCTIONS
18
19#else
20
21#include <atomic>
22#include <cassert>
23#include <cstdint>
24#include <utility>
25
26namespace xenium { namespace reclamation { namespace detail {
27 struct allocation_tracker;
28
29 template <typename Tracker>
30 struct tracked_object {
31 tracked_object() noexcept { Tracker::count_allocation(); }
32 tracked_object(const tracked_object&) noexcept { Tracker::count_allocation(); }
33 tracked_object(tracked_object&&) noexcept { Tracker::count_allocation(); }
34 virtual ~tracked_object() noexcept { Tracker::count_reclamation(); }
35 };
36
37 struct allocation_counter
38 {
39 ~allocation_counter() { vals->dead = true; }
40
41 struct values
42 {
43 values() :
44 allocated_instances(),
45 reclaimed_instances(),
46 dead(false),
47 next()
48 {}
49 std::atomic<std::size_t> allocated_instances;
50 std::atomic<std::size_t> reclaimed_instances;
51 std::atomic<bool> dead;
52 values* next;
53 };
54 void count_allocation()
55 {
56 assert(vals->dead == false);
57 auto v = vals->allocated_instances.load(std::memory_order_relaxed);
58 vals->allocated_instances.store(v + 1, std::memory_order_relaxed);
59 }
60 void count_reclamation()
61 {
62 assert(vals->dead == false);
63 auto v = vals->reclaimed_instances.load(std::memory_order_relaxed);
64 vals->reclaimed_instances.store(v + 1, std::memory_order_relaxed);
65 }
66 protected:
67 values* vals = new values();;
68 };
69
70 template <typename Tracker>
71 struct registered_allocation_counter : allocation_counter
72 {
73 registered_allocation_counter()
74 {
75 auto h = Tracker::allocation_tracker.head.load(std::memory_order_relaxed);
76 do
77 {
78 vals->next = h;
79 } while (!Tracker::allocation_tracker.head.compare_exchange_weak(h, vals, std::memory_order_release));
80 }
81 };
82 struct allocation_tracker
83 {
84 std::pair<std::size_t, std::size_t> get_counters() const
85 {
86 std::size_t allocated_instances = collapsed_allocated_instances;
87 std::size_t reclaimed_instances = collapsed_reclaimed_instances;
88 auto p = head.load(std::memory_order_acquire);
89 while (p)
90 {
91 allocated_instances += p->allocated_instances.load(std::memory_order_relaxed);
92 reclaimed_instances += p->reclaimed_instances.load(std::memory_order_relaxed);
93 p = p->next;
94 }
95 return std::make_pair(allocated_instances, reclaimed_instances);
96 }
97
98 void collapse_counters()
99 {
100 auto p = head.load(std::memory_order_acquire);
101 allocation_counter::values* remaining = nullptr;
102 while (p)
103 {
104 auto next = p->next;
105 if (p->dead.load(std::memory_order_relaxed))
106 {
107 collapsed_allocated_instances += p->allocated_instances.load(std::memory_order_relaxed);
108 collapsed_reclaimed_instances += p->reclaimed_instances.load(std::memory_order_relaxed);
109 delete p;
110 }
111 else
112 {
113 p->next = remaining;
114 remaining = p;
115 }
116 p = next;
117 }
118 head.store(remaining, std::memory_order_relaxed);
119 }
120 private:
121 template <typename>
122 friend struct registered_allocation_counter;
123 std::atomic<allocation_counter::values*> head;
124 std::size_t collapsed_allocated_instances = 0;
125 std::size_t collapsed_reclaimed_instances = 0;
126 };
127}}}
128
129#define ALLOCATION_COUNTER(tracker) \
130 detail::registered_allocation_counter<tracker> allocation_counter;
131
132#define ALLOCATION_TRACKER \
133 inline static detail::allocation_tracker allocation_tracker;
134
135#define ALLOCATION_TRACKING_FUNCTIONS \
136 template <typename> friend struct detail::tracked_object; \
137 static void count_allocation(); \
138 static void count_reclamation();
139
140#endif
141
142#endif