Codebase list foonathan-memory / 467c2e3
New upstream version 0.7 Timo Röhling 3 years ago
125 changed file(s) with 24896 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 AccessModifierOffset: -4
1 AlignAfterOpenBracket: Align
2 AlignConsecutiveAssignments: true
3 AlignConsecutiveDeclarations: true
4 AlignEscapedNewlinesLeft: false
5 AlignOperands: true
6 AlignTrailingComments: true
7 AllowAllParametersOfDeclarationOnNextLine: true
8 AllowShortBlocksOnASingleLine: false
9 AllowShortCaseLabelsOnASingleLine: false
10 AllowShortFunctionsOnASingleLine: Empty
11 AllowShortIfStatementsOnASingleLine: false
12 AllowShortLoopsOnASingleLine: false
13 AlwaysBreakAfterReturnType: None
14 AlwaysBreakBeforeMultilineStrings: false
15 AlwaysBreakTemplateDeclarations: true
16 BinPackArguments: true
17 BinPackParameters: true
18 BreakBeforeBinaryOperators: NonAssignment
19 BreakBeforeBraces: Allman
20 BreakBeforeTernaryOperators: false
21 BreakConstructorInitializersBeforeComma: false
22 ConstructorInitializerAllOnOneLineOrOnePerLine: true
23 ConstructorInitializerIndentWidth: 0
24 ContinuationIndentWidth: 4
25 ColumnLimit: 100
26 Cpp11BracedListStyle: true
27 IndentCaseLabels: false
28 IndentWidth: 4
29 IndentWrappedFunctionNames: true
30 Language: Cpp
31 KeepEmptyLinesAtTheStartOfBlocks: false
32 MaxEmptyLinesToKeep: 1
33 NamespaceIndentation: All
34 PenaltyBreakBeforeFirstCallParameter: 19937
35 PenaltyReturnTypeOnItsOwnLine: 19937
36 PointerAlignment: Left
37 ReflowComments: false
38 SortIncludes: false
39 SpaceAfterCStyleCast: false
40 SpaceBeforeAssignmentOperators: true
41 SpaceBeforeParens: ControlStatements
42 SpaceInEmptyParentheses: false
43 SpacesBeforeTrailingComments: 1
44 SpacesInAngles: false
45 SpacesInCStyleCastParentheses: false
46 SpacesInParentheses: false
47 SpacesInSquareBrackets: false
48 Standard: Cpp11
49 TabWidth: 4
50 UseTab: Never
0 patreon: foonathan
1 custom: ['https://jonathanmueller.dev/support-me/']
2
0 name: Code Coverage
1
2 on: [push, pull_request]
3
4 jobs:
5 code_coverage:
6 runs-on: ubuntu-latest
7
8 steps:
9 - uses: actions/checkout@v2
10 - name: Create Build Environment
11 run: cmake -E make_directory build
12
13 - name: Configure
14 working-directory: build/
15 run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_FLAGS="--coverage" -DCMAKE_EXE_LINKER_FLAGS="--coverage"
16 - name: Build
17 working-directory: build/
18 run: cmake --build . --config Debug
19 - name: Test
20 working-directory: build/
21 run: ctest -C Debug --output-on-failure
22
23 - name: Collect code coverage
24 working-directory: build/
25 run: bash <(curl -s https://codecov.io/bash)
26
0 name: Feature CI
1
2 on:
3 push:
4 branches-ignore: ['master']
5 pull_request:
6
7 jobs:
8 linux:
9 strategy:
10 fail-fast: false
11 matrix:
12 image:
13 # List: https://github.com/conan-io/conan-docker-tools
14 - gcc10
15 - gcc5
16 - clang10
17 - clang40
18
19 runs-on: ubuntu-latest
20 container:
21 image: conanio/${{matrix.image}}
22 options: --user root
23
24 steps:
25 - uses: actions/checkout@v2
26 - name: Create Build Environment
27 run: cmake -E make_directory build
28
29 - name: Configure
30 working-directory: build/
31 run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Debug
32 - name: Build
33 working-directory: build/
34 run: cmake --build . --config Debug
35 - name: Test
36 working-directory: build/
37 run: ctest -C Debug --output-on-failure
38
39 macos:
40 strategy:
41 fail-fast: false
42 matrix:
43 xcode: ['10', '12']
44
45 runs-on: macos-latest
46
47 steps:
48 - uses: actions/checkout@v2
49 - uses: maxim-lobanov/setup-xcode@v1
50 with:
51 xcode-version: ${{matrix.xcode}}
52 - name: Create Build Environment
53 run: cmake -E make_directory build
54
55 - name: Configure
56 working-directory: build/
57 run: cmake $GITHUB_WORKSPACE
58 - name: Build
59 working-directory: build/
60 run: cmake --build .
61 - name: Test
62 working-directory: build/
63 run: ctest --output-on-failure
64
65 windows:
66 runs-on: windows-latest
67
68 steps:
69 - uses: actions/checkout@v2
70 - name: Create Build Environment
71 run: cmake -E make_directory build
72
73 - name: Configure and build
74 uses: lukka/run-cmake@main
75 with:
76 cmakeGenerator: 'Ninja'
77 cmakeListsOrSettingsJson: 'CMakeListsTxtBasic'
78 cmakeListsTxtPath: '${{ github.workspace }}/CMakeLists.txt'
79 useVcpkgToolchainFile: true
80 buildDirectory: 'build/'
81 - name: Test
82 working-directory: build/
83 run: ctest -C Debug --output-on-failure
84
0 name: Main CI
1
2 on:
3 push:
4 branches: [master]
5
6 jobs:
7 linux:
8 strategy:
9 fail-fast: false
10 matrix:
11 image:
12 # List: https://github.com/conan-io/conan-docker-tools
13 - gcc10
14 - gcc9
15 - gcc8
16 - gcc7
17 - gcc6
18 - gcc5
19 - clang10
20 - clang9
21 - clang8
22 - clang7
23 - clang60
24 - clang50
25 - clang40
26 build_type: [Debug, Release]
27
28 runs-on: ubuntu-latest
29 container:
30 image: conanio/${{matrix.image}}
31 options: --user root
32
33 steps:
34 - uses: actions/checkout@v2
35 - name: Create Build Environment
36 run: cmake -E make_directory build
37
38 - name: Configure
39 working-directory: build/
40 run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${{matrix.build_type}}
41 - name: Build
42 working-directory: build/
43 run: cmake --build . --config ${{matrix.build_type}}
44 - name: Test
45 working-directory: build/
46 run: ctest -C ${{matrix.build_type}} --output-on-failure
47
48 macos:
49 strategy:
50 fail-fast: false
51 matrix:
52 xcode:
53 - '10'
54 - '11'
55 - '12'
56
57 runs-on: macos-latest
58
59 steps:
60 - uses: actions/checkout@v2
61 - uses: maxim-lobanov/setup-xcode@v1
62 with:
63 xcode-version: ${{matrix.xcode}}
64 - name: Create Build Environment
65 run: cmake -E make_directory build
66
67 - name: Configure
68 working-directory: build/
69 run: cmake $GITHUB_WORKSPACE
70 - name: Build
71 working-directory: build/
72 run: cmake --build .
73 - name: Test
74 working-directory: build/
75 run: ctest --output-on-failure
76
77 windows:
78 strategy:
79 fail-fast: false
80 matrix:
81 build_type: [Debug, Release]
82
83 runs-on: windows-latest
84
85 steps:
86 - uses: actions/checkout@v2
87 - name: Create Build Environment
88 run: cmake -E make_directory build
89
90 - name: Configure and build
91 uses: lukka/run-cmake@main
92 with:
93 cmakeGenerator: 'Ninja'
94 cmakeListsOrSettingsJson: 'CMakeListsTxtBasic'
95 cmakeListsTxtPath: '${{ github.workspace }}/CMakeLists.txt'
96 useVcpkgToolchainFile: true
97 buildDirectory: 'build/'
98 - name: Test
99 working-directory: build/
100 run: ctest -C Debug --output-on-failure
101
0 /doc/html
1 /doc/*.tmp
2 # Created by .ignore support plugin (hsz.mobi)
3
4 build
5 .vscode
0 # Upcoming Changes
1
2 # 0.7
3
4 BREAKING: Removed the use of the compatibility library to automatically generate macros and workaround for older compilers.
5 The important compatibility workarounds like the `__builtin_clz` extension are still used, but workarounds for missing C++11 library features have been removed.
6 In particular, the library now requires compiler support for `noexcept`, `constexpr`, `alignof` and `thread_local`.
7 This means that GCC 4.8 and Visual Studio version 12.0 (both released in 2013), are now longer supported.
8
9 ## Adapter
10
11 BREAKING: Remove `Mutex` support from `allocator_reference` and consequently from `std_allocator`, `allocator_deleter`, ...
12 Embedding the `Mutex` with the reference was *fundamentally* broken and unusable to ensure thread safety.
13 Use a reference to a `thread_safe_allocator` instead, which actually guarantees thread safety.
14
15 ## Allocator
16
17 Add ability to query the minimal block size required by a `memory_pool` or `memory_stack` that should contain the given memory.
18 Due to internal data structures and debug fences this is more than the naive memory request, so it can be computed now.
19
20 ## Bugfixes
21
22 * more CMake improvements for cross-compiling, among others
23 * bugfixes to support UWP (#80), VxWorks (#81) and QNX (#85, #88, among others)
24 * better support missing container node size (#59, #72, among others)
25 * fix alignment issues in debug mode
26 * fix tracking for allocators without block allocators
27
28 ---
29
30 # 0.6-2
31
32 Various bug fixes, compiler warning workarounds and CMake improvements accumulated over past two years.
33 Most notable changes:
34
35 * cross compilation works now
36 * `fallback_allocator` is default constructible if stateless
37 * add `unique_base_ptr` to support a unique ptr to a base class
38 * add `allocate_unique` overloads that take a custom mutex
39 * allocator deleters are default constructible
40
41 ---
42
43 # 0.6-1
44
45 * fix CMake configuration error
46 * fix double free error in `segregator`
47 * add `static_assert()` when default constructing a stateful `std_allocator`
48 * fix various compiler warnings
49
50 # 0.6
51
52 ## Tool
53
54 * better MSVC support
55 * improved compilation time
56
57 ## Core
58
59 * add literal operators for memory sizes (`4_KiB`)
60 * more flexible `make_block_allocator`
61 * composable allocator concept
62
63 ## Allocator
64
65 * improved `temporary_allocator`: explicit separation into `temporary_stack`, improved automatic creation
66 * new `memory_stack_raii_unwind` for automatic unwinding
67 * new `iteration_allocator`
68 * make allocators composable
69 * add facilities for joint memory allocations
70
71 ## Adapter
72
73 * add `shared_ptr_node_size`
74 * add `string` container typedef
75 * add `fallback_allocator`
76 * add `segregator`
77
78 ## Bugfixes
79
80 * OSX support
81 * various warnings fixed
82
83 ---
84
85 # 0.5
86 * improved CMake build system, now supports cmake installation and `find_package()`
87 * improved low-level allocators and added `malloc_allocator`
88 * add virtual memory interface and allocators
89 * add allocators using a fixed-sized storage block
90 * introduced `BlockAllocator` concept and various implementations
91 * new class template `memory_arena` that is used inside the higher level allocators, allows more control over the internal allocations
92 * add wrappers/adapters for the polymorphic memory resource TS
93 * improved tracking classes
94 * other improvements like concept checks and more exception classes
95 * internal changes
96
97 ---
98
99 # 0.4
100
101 * polished up the interface, many breaking changes in the form of renaming and new header files
102 * added unified error handling facilities and handler functions in case exceptions are not supported
103 * improved old allocator adapters by introducing allocator_storage template
104 * improved allocator_traits making them more powerful and able to handle Allcoator types directly
105 * added type-erased allocator storage
106 * added node-size debugger that obtains information about the container node sizes
107 * most parts now work on a freestanding implementation
108 * used foonathan/compatibility for CMake compatibility checks
109 * added miscellaneous tiny features all over the place
110 * many internal changes and bugfixes
111
112 ---
113
114 # 0.3
115
116 * added debugging options such as memory filling and deallocation and leak check
117 * improved performance of pool allocators
118 * changed complete project structure and CMake
119 * many internal changes and bugfixes and automated testing
120
121 ---
122
123 # 0.2
124
125 * added temporary_allocator as portable alloca
126 * added small_node_pool type optimized for low-overhead small object allocations
127 * added various allocator adapters including a thread_safe_allocator for locking
128 * better compiler support
129 * many internal changes and bugfixes
130
131 ---
132
133 # 0.1-1
134
135 * critical bugfix in memory_stack
136 * added smart pointer example
137
138 ---
139
140 # 0.1
141
142 * first beta version
0 # Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 # This file is subject to the license terms in the LICENSE file
2 # found in the top-level directory of this distribution.
3
4 # root CMakeLists.txt, specifies option and interface library
5
6 cmake_minimum_required(VERSION 3.1)
7 project(FOONATHAN_MEMORY)
8
9 set(FOONATHAN_MEMORY_VERSION_MAJOR 0 CACHE STRING "major version of memory" FORCE)
10 set(FOONATHAN_MEMORY_VERSION_MINOR 7 CACHE STRING "minor version of memory" FORCE)
11 set(FOONATHAN_MEMORY_VERSION_PATCH 0 CACHE STRING "patch version of memory" FORCE)
12 set(FOONATHAN_MEMORY_VERSION "${FOONATHAN_MEMORY_VERSION_MAJOR}.${FOONATHAN_MEMORY_VERSION_MINOR}.${FOONATHAN_MEMORY_VERSION_PATCH}"
13 CACHE STRING "version of memory" FORCE)
14
15
16 # set a debug postfix
17 set(CMAKE_DEBUG_POSTFIX "-dbg")
18
19 # installation destinations
20 if(UNIX OR VXWORKS)
21 include(GNUInstallDirs)
22
23 set(FOONATHAN_MEMORY_INC_INSTALL_DIR "${CMAKE_INSTALL_INCLUDEDIR}/foonathan_memory")
24 set(FOONATHAN_MEMORY_RUNTIME_INSTALL_DIR "${CMAKE_INSTALL_BINDIR}")
25 set(FOONATHAN_MEMORY_LIBRARY_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}")
26 set(FOONATHAN_MEMORY_ARCHIVE_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}")
27 set(FOONATHAN_MEMORY_FRAMEWORK_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}")
28
29 set(FOONATHAN_MEMORY_CMAKE_CONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/foonathan_memory/cmake")
30 set(FOONATHAN_MEMORY_ADDITIONAL_FILES_INSTALL_DIR "${CMAKE_INSTALL_DATADIR}/foonathan_memory")
31 elseif(WIN32)
32 set(FOONATHAN_MEMORY_INC_INSTALL_DIR "include/foonathan_memory")
33 set(FOONATHAN_MEMORY_RUNTIME_INSTALL_DIR "bin")
34 set(FOONATHAN_MEMORY_LIBRARY_INSTALL_DIR "bin")
35 set(FOONATHAN_MEMORY_ARCHIVE_INSTALL_DIR "lib")
36 set(FOONATHAN_MEMORY_FRAMEWORK_INSTALL_DIR "bin")
37
38 set(FOONATHAN_MEMORY_CMAKE_CONFIG_INSTALL_DIR "share/foonathan_memory/cmake")
39 set(FOONATHAN_MEMORY_ADDITIONAL_FILES_INSTALL_DIR "share/foonathan_memory")
40 set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
41 else()
42 message(FATAL_ERROR "Could not set install folders for this platform!")
43 endif()
44
45 include(cmake/configuration.cmake)
46
47 # subdirectories
48 add_subdirectory(src)
49 if(FOONATHAN_MEMORY_BUILD_EXAMPLES)
50 add_subdirectory(example)
51 endif()
52 if(FOONATHAN_MEMORY_BUILD_TESTS)
53 enable_testing()
54 add_subdirectory(test)
55 endif()
56 if(FOONATHAN_MEMORY_BUILD_TOOLS)
57 add_subdirectory(tool)
58 endif()
59
60 # install readme and license
61 install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE" "${CMAKE_CURRENT_SOURCE_DIR}/README.md" DESTINATION ${FOONATHAN_MEMORY_ADDITIONAL_FILES_INSTALL_DIR})
62
63 install(EXPORT foonathan_memoryTargets DESTINATION ${FOONATHAN_MEMORY_CMAKE_CONFIG_INSTALL_DIR}
64 FILE foonathan_memory-config.cmake)
65
66
0 Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1
2 This software is provided 'as-is', without any express or
3 implied warranty. In no event will the authors be held
4 liable for any damages arising from the use of this software.
5
6 Permission is granted to anyone to use this software for any purpose,
7 including commercial applications, and to alter it and redistribute
8 it freely, subject to the following restrictions:
9
10 1. The origin of this software must not be misrepresented;
11 you must not claim that you wrote the original software.
12 If you use this software in a product, an acknowledgment
13 in the product documentation would be appreciated but
14 is not required.
15
16 2. Altered source versions must be plainly marked as such,
17 and must not be misrepresented as being the original software.
18
19 3. This notice may not be removed or altered from any
20 source distribution.
0 # memory
1
2 ![Project Status](https://img.shields.io/endpoint?url=https%3A%2F%2Fwww.jonathanmueller.dev%2Fproject%2Fmemory%2Findex.json)
3 ![Build Status](https://github.com/foonathan/memory/workflows/Main%20CI/badge.svg)
4 [![Code Coverage](https://codecov.io/gh/foonathan/memory/branch/master/graph/badge.svg?token=U6wnInlamY)](https://codecov.io/gh/foonathan/memory)
5
6 The C++ STL allocator model has various flaws. For example, they are fixed to a certain type, because they are almost necessarily required to be templates. So you can't easily share a single allocator for multiple types. In addition, you can only get a copy from the containers and not the original allocator object. At least with C++11 they are allowed to be stateful and so can be made object not instance based. But still, the model has many flaws.
7 Over the course of the years many solutions have been proposed, for example [EASTL]. This library is another. But instead of trying to change the STL, it works with the current implementation.
8
9 If you like this project, consider [supporting me](https://jonathanmueller.dev/support-me/).
10 It would really help!
11
12 ## Features
13
14 New allocator concepts:
15
16 * a `RawAllocator` that is similar to an `Allocator` but easier to use and write
17 * a `BlockAllocator` that is an allocator for huge memory blocks
18
19 Several implementations:
20
21 * `heap_/malloc_/new_allocator`
22 * virtual memory allocators
23 * allocator using a static memory block located on the stack
24 * memory stack, `iteration_allocator`
25 * different memory pools
26 * a portable, improved `alloca()` in the form of `temporary_allocator`
27 * facilities for joint memory allocations: share a big memory block for the object
28 and all dynamic memory allocations for its members
29
30 Adapters, wrappers and storage classes:
31
32 * incredible powerful `allocator_traits` allowing `Allocator`s as `RawAllocator`s
33 * `std_allocator` to make a `RawAllocator` an `Allocator` again
34 * adapters for the memory resource TS
35 * `allocator_deleter` classes for smart pointers
36 * (optionally type-erased) `allocator_reference` and other storage classes
37 * memory tracking wrapper
38
39 In addition:
40
41 * container node size debuggers that obtain information about the node size of an STL container at compile-time to specify node sizes for pools
42 * debugging options for leak checking, double-free checks or buffer overflows
43 * customizable error handling routines that can work with exceptions disabled
44 * everything except the STL adapters works on a freestanding environment
45
46 ## Basic example
47
48 ```cpp
49 #include <algorithm>
50 #include <iostream>
51 #include <iterator>
52
53 #include <foonathan/memory/container.hpp> // vector, list, list_node_size
54 #include <foonathan/memory/memory_pool.hpp> // memory_pool
55 #include <foonathan/memory/smart_ptr.hpp> // allocate_unique
56 #include <foonathan/memory/static_allocator.hpp> // static_allocator_storage, static_block_allocator
57 #include <foonathan/memory/temporary_allocator.hpp> // temporary_allocator
58
59 // alias namespace foonathan::memory as memory for easier access
60 #include <foonathan/memory/namespace_alias.hpp>
61
62 template <typename BiIter>
63 void merge_sort(BiIter begin, BiIter end);
64
65 int main()
66 {
67 using namespace memory::literals;
68
69 // a memory pool RawAllocator
70 // allocates a memory block - initially 4KiB - and splits it into chunks of list_node_size<int>::value big
71 // list_node_size<int>::value is the size of each node of a std::list
72 memory::memory_pool<> pool(memory::list_node_size<int>::value, 4_KiB);
73
74 // just an alias for std::list<int, memory::std_allocator<int, memory::memory_pool<>>
75 // a std::list using a memory_pool
76 // std_allocator stores a reference to a RawAllocator and provides the Allocator interface
77 memory::list<int, memory::memory_pool<>> list(pool);
78 list.push_back(3);
79 list.push_back(2);
80 list.push_back(1);
81
82 for (auto e : list)
83 std::cout << e << ' ';
84 std::cout << '\n';
85
86 merge_sort(list.begin(), list.end());
87
88 for (auto e : list)
89 std::cout << e << ' ';
90 std::cout << '\n';
91
92 // allocate a std::unique_ptr using the pool
93 // memory::allocate_shared is also available
94 auto ptr = memory::allocate_unique<int>(pool, *list.begin());
95 std::cout << *ptr << '\n';
96
97 // static storage of size 4KiB
98 memory::static_allocator_storage<4096u> storage;
99
100 // a memory pool again but this time with a BlockAllocator
101 // this controls the internal allocations of the pool itself
102 // we need to specify the first template parameter giving the type of the pool as well
103 // (node_pool is the default)
104 // we use a static_block_allocator that uses the static storage above
105 // all allocations will use a memory block on the stack
106 using static_pool_t = memory::memory_pool<memory::node_pool, memory::static_block_allocator>;
107 static_pool_t static_pool(memory::unordered_set_node_size<int>::value, 4096u, storage);
108
109 // again, just an alias for std::unordered_set<int, std::hash<int>, std::equal_to<int>, memory::std_allocator<int, static_pool_t>
110 // see why I wrote these? :D
111 // now we have a hash set that lives on the stack!
112 memory::unordered_set<int, static_pool_t> set(static_pool);
113
114 set.insert(3);
115 set.insert(2);
116 set.insert(3); // running out of stack memory is properly handled, of course
117
118 for (auto e : set)
119 std::cout << e << ' ';
120 std::cout << '\n';
121 }
122
123 // naive implementation of merge_sort using temporary memory allocator
124 template <typename BiIter>
125 void merge_sort(BiIter begin, BiIter end)
126 {
127 using value_type = typename std::iterator_traits<BiIter>::value_type;
128
129 auto distance = std::distance(begin, end);
130 if (distance <= 1)
131 return;
132
133 auto mid = begin;
134 std::advance(mid, distance / 2);
135
136 // an allocator for temporary memory
137 // is similar to alloca() but uses its own stack
138 // this stack is thread_local and created on the first call to this function
139 // as soon as the allocator object goes out of scope, everything allocated through it, will be freed
140 auto alloc = memory::make_temporary_allocator();
141
142 // alias for std::vector<value_type, memory::std_allocator<value_type, memory::temporary_allocator>>
143 // a std::vector using a temporary_allocator
144 memory::vector<value_type, memory::temporary_allocator> first(begin, mid, alloc),
145 second(mid, end, alloc);
146
147 merge_sort(first.begin(), first.end());
148 merge_sort(second.begin(), second.end());
149 std::merge(first.begin(), first.end(), second.begin(), second.end(), begin);
150 }
151 ```
152
153 See `example/` for more.
154
155 ## Installation
156
157 This library can be used as [CMake] subdirectory.
158 It is tested on GCC 4.8-5.0, Clang 3.5 and Visual Studio 2013. Newer versions should work too.
159
160 1. Fetch it, e.g. using [git submodules] `git submodule add https://github.com/foonathan/memory ext/memory` and `git submodule update --init --recursive`.
161
162 2. Call `add_subdirectory(ext/memory)` or whatever your local path is to make it available in CMake.
163
164 3. Simply call `target_link_libraries(your_target PUBLIC foonathan_memory)` to link this library and setups the include search path and compilation options.
165
166 You can also install the library:
167
168 1. Run `cmake -DCMAKE_BUILD_TYPE="buildtype" -DFOONATHAN_MEMORY_BUILD_EXAMPLES=OFF -DFOONATHAN_MEMORY_BUILD_TESTS=OFF .` inside the library sources.
169
170 2. Run `cmake --build . -- install` to install the library under `${CMAKE_INSTALL_PREFIX}`.
171
172 3. Repeat 1 and 2 for each build type/configuration you want to have (like `Debug`, `RelWithDebInfo` and `Release` or custom names).
173
174 To use an installed library:
175
176 4. Call `find_package(foonathan_memory major.minor REQUIRED)` to find the library.
177
178 5. Call `target_link_libraries(your_target PUBLIC foonathan_memory)` to link to the library and setup all required options.
179
180 See https://foonathan.net/memory/md_doc_installation.html for a detailed guide.
181
182 ## Documentation
183
184 Full documentation can be found at https://foonathan.net/memory.
185
186 A tutorial is also available at https://foonathan.net/memory/md_doc_tutorial.html.
187
188 ## RawAllocator
189
190 Below is the interface required for a `RawAllocator`, everything optional is marked:
191
192 ```cpp
193 struct raw_allocator
194 {
195 using is_stateful = std::integral_constant<bool, Value>; // optional, defaults to std::is_empty
196
197 void* allocate_node(std::size_t size, std::size_t alignment); // required, allocation function
198 void deallocate_node(void *node, std::size_t size, std::size_t alignment) noexcept; // required, deallocation function
199
200 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment); // optional, forwards to node version
201 void deallocate_array(void *ptr, std::size_t count, std::size_t size, std::size_t alignment) noexcept; // optional, forwards to node version
202
203 std::size_t max_node_size() const; // optional, returns maximum value
204 std::size_t max_array_size() const; // optional, forwards to max_node_size()
205 std::size_t max_alignment() const; // optional, returns maximum fundamental alignment, i.e. alignof(std::max_align_t)
206 };
207 ```
208
209 A `RawAllocator` only needs to be moveable, all `Allocator` classes are `RawAllocators` too.
210 Classes not providing the interface can specialize the `allocator_traits`, read more about [writing allocators here](https://foonathan.net/memory/md_doc_writing_allocators.html) or about the technical details of the [concept here](https://foonathan.net/memory/md_doc_concepts.html).
211
212 ## Acknowledgements
213
214 This project is greatly supported by my [patrons](https://patreon.com/foonathan).
215 In particular thanks to the individual supporters:
216
217 * Kaido Kert
218
219 And big thanks to the contributors as well:
220
221 * @asobhy-qnx
222 * @bfierz
223 * @nicolastagliani
224 * @cho3
225 * @j-carl
226 * @myd7349
227 * @moazzamak
228 * @maksqwe
229 * @kaidokert
230 * @gabyx
231 * @maksqwe
232 * @Manu343726
233 * @MiguelCompany
234 * @moazzamak
235 * @myd7349
236 * @quattrinili
237 * @razr
238 * @seanyen
239 * @wtsnyder
240 * @zhouchengming1
241
242 [EASTL]: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2271.html
243 [CMake]: www.cmake.org
244 [git submodules]: http://git-scm.com/docs/git-submodule
245 [foonathan/compatibility]: hptts://github.com/foonathan/compatibility
0 # Copyright (C) 2015-2016 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 # This file is subject to the license terms in the LICENSE file
2 # found in the top-level directory of this distribution.
3
4 # defines configuration options
5 # note: only include it in memory's top-level CMakeLists.txt, after compatibility.cmake
6
7 # what to build
8 # examples/tests if toplevel directory (i.e. direct build, not as subdirectory) and hosted
9 # tools if hosted
10 if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
11 set(build_examples_tests 1)
12 else()
13 set(build_examples_test 0)
14 endif()
15 set(build_tools 1)
16
17 option(FOONATHAN_MEMORY_BUILD_EXAMPLES "whether or not to build the examples" ${build_examples_tests})
18 option(FOONATHAN_MEMORY_BUILD_TESTS "whether or not to build the tests" ${build_examples_tests})
19 option(FOONATHAN_MEMORY_BUILD_TOOLS "whether or not to build the tools" ${build_tools})
20
21 # debug options, pre-set by build type
22 if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
23 set(FOONATHAN_MEMORY_DEBUG_ASSERT ON CACHE BOOL "" FORCE)
24 set(FOONATHAN_MEMORY_DEBUG_FILL ON CACHE BOOL "" FORCE)
25 set(FOONATHAN_MEMORY_DEBUG_FENCE 8 CACHE STRING "" FORCE)
26 set(FOONATHAN_MEMORY_DEBUG_LEAK_CHECK ON CACHE BOOL "" FORCE)
27 set(FOONATHAN_MEMORY_DEBUG_POINTER_CHECK ON CACHE BOOL "" FORCE)
28 set(FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK ON CACHE BOOL "" FORCE)
29 elseif("${CMAKE_BUILD_TYPE}" STREQUAL "RelWithDebInfo")
30 set(FOONATHAN_MEMORY_DEBUG_ASSERT OFF CACHE BOOL "" FORCE)
31 set(FOONATHAN_MEMORY_DEBUG_FILL ON CACHE BOOL "" FORCE)
32 set(FOONATHAN_MEMORY_DEBUG_FENCE 0 CACHE STRING "" FORCE)
33 set(FOONATHAN_MEMORY_DEBUG_LEAK_CHECK ON CACHE BOOL "" FORCE)
34 set(FOONATHAN_MEMORY_DEBUG_POINTER_CHECK ON CACHE BOOL "" FORCE)
35 set(FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK OFF CACHE BOOL "" FORCE)
36 elseif("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
37 set(FOONATHAN_MEMORY_DEBUG_ASSERT OFF CACHE BOOL "" FORCE)
38 set(FOONATHAN_MEMORY_DEBUG_FILL OFF CACHE BOOL "" FORCE)
39 set(FOONATHAN_MEMORY_DEBUG_FENCE 0 CACHE STRING "" FORCE)
40 set(FOONATHAN_MEMORY_DEBUG_LEAK_CHECK OFF CACHE BOOL "" FORCE)
41 set(FOONATHAN_MEMORY_DEBUG_POINTER_CHECK OFF CACHE BOOL "" FORCE)
42 set(FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK OFF CACHE BOOL "" FORCE)
43 else()
44 option(FOONATHAN_MEMORY_DEBUG_ASSERT
45 "whether or not internal assertions (like the macro assert) are enabled" OFF)
46 option(FOONATHAN_MEMORY_DEBUG_FILL
47 "whether or not the (de-)allocated memory will be pre-filled" OFF)
48 set(FOONATHAN_MEMORY_DEBUG_FENCE 0 CACHE STRING
49 "the amount of memory used as fence to help catching overflow errors" )
50 option(FOONATHAN_MEMORY_DEBUG_LEAK_CHECK
51 "whether or not leak checking is active" OFF)
52 option(FOONATHAN_MEMORY_DEBUG_POINTER_CHECK
53 "whether or not pointer checking on deallocation is active" OFF)
54 option(FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK
55 "whether or not the (sometimes expensive) check for double deallocation is active" OFF)
56 endif()
57
58 # other options
59 option(FOONATHAN_MEMORY_CHECK_ALLOCATION_SIZE
60 "whether or not the size of the allocation will be checked" ON)
61 set(FOONATHAN_MEMORY_DEFAULT_ALLOCATOR heap_allocator CACHE STRING
62 "the default implementation allocator for higher-level ones")
63 set(FOONATHAN_MEMORY_MEMORY_RESOURCE_HEADER "<memory_resource>" CACHE STRING
64 "the header of the memory_resource class used")
65 set(FOONATHAN_MEMORY_MEMORY_RESOURCE std::memory_resource CACHE STRING
66 "the memory_resource class used")
67 option(FOONATHAN_MEMORY_EXTERN_TEMPLATE
68 "whether or not common template instantiations are already provided by the library" ON)
69 set(FOONATHAN_MEMORY_TEMPORARY_STACK_MODE 2 CACHE STRING
70 "set to 0 to disable the per-thread stack completely, to 1 to disable the nitfy counter and to 2 to enable everything")
71 set(FOONATHAN_MEMORY_CONTAINER_NODE_SIZES_IMPL container_node_sizes_impl.hpp CACHE FILEPATH
72 "the path of the header that defines the node sizes and alignments if pre-generated.")
0 # Doxyfile 1.8.18
1
2 # This file describes the settings to be used by the documentation system
3 # doxygen (www.doxygen.org) for a project.
4 #
5 # All text after a double hash (##) is considered a comment and is placed in
6 # front of the TAG it is preceding.
7 #
8 # All text after a single hash (#) is considered a comment and will be ignored.
9 # The format is:
10 # TAG = value [value, ...]
11 # For lists, items can also be appended using:
12 # TAG += value [value, ...]
13 # Values that contain spaces should be placed between quotes (\" \").
14
15 #---------------------------------------------------------------------------
16 # Project related configuration options
17 #---------------------------------------------------------------------------
18
19 # This tag specifies the encoding used for all characters in the configuration
20 # file that follow. The default is UTF-8 which is also the encoding used for all
21 # text before the first occurrence of this tag. Doxygen uses libiconv (or the
22 # iconv built into libc) for the transcoding. See
23 # https://www.gnu.org/software/libiconv/ for the list of possible encodings.
24 # The default value is: UTF-8.
25
26 DOXYFILE_ENCODING = UTF-8
27
28 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
29 # double-quotes, unless you are using Doxywizard) that should identify the
30 # project for which the documentation is generated. This name is used in the
31 # title of most generated pages and in a few other places.
32 # The default value is: My Project.
33
34 PROJECT_NAME = memory
35
36 # The PROJECT_NUMBER tag can be used to enter a project or revision number. This
37 # could be handy for archiving the generated documentation or if some version
38 # control system is used.
39
40 PROJECT_NUMBER =
41
42 # Using the PROJECT_BRIEF tag one can provide an optional one line description
43 # for a project that appears at the top of each page and should give viewer a
44 # quick idea about the purpose of the project. Keep the description short.
45
46 PROJECT_BRIEF =
47
48 # With the PROJECT_LOGO tag one can specify a logo or an icon that is included
49 # in the documentation. The maximum height of the logo should not exceed 55
50 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
51 # the logo to the output directory.
52
53 PROJECT_LOGO =
54
55 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
56 # into which the generated documentation will be written. If a relative path is
57 # entered, it will be relative to the location where doxygen was started. If
58 # left blank the current directory will be used.
59
60 OUTPUT_DIRECTORY = doc/
61
62 # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
63 # directories (in 2 levels) under the output directory of each output format and
64 # will distribute the generated files over these directories. Enabling this
65 # option can be useful when feeding doxygen a huge amount of source files, where
66 # putting all generated files in the same directory would otherwise causes
67 # performance problems for the file system.
68 # The default value is: NO.
69
70 CREATE_SUBDIRS = NO
71
72 # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
73 # characters to appear in the names of generated files. If set to NO, non-ASCII
74 # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
75 # U+3044.
76 # The default value is: NO.
77
78 ALLOW_UNICODE_NAMES = NO
79
80 # The OUTPUT_LANGUAGE tag is used to specify the language in which all
81 # documentation generated by doxygen is written. Doxygen will use this
82 # information to generate all constant output in the proper language.
83 # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
84 # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
85 # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
86 # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
87 # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
88 # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
89 # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
90 # Ukrainian and Vietnamese.
91 # The default value is: English.
92
93 OUTPUT_LANGUAGE = English
94
95 # The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
96 # documentation generated by doxygen is written. Doxygen will use this
97 # information to generate all generated output in the proper direction.
98 # Possible values are: None, LTR, RTL and Context.
99 # The default value is: None.
100
101 OUTPUT_TEXT_DIRECTION = None
102
103 # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
104 # descriptions after the members that are listed in the file and class
105 # documentation (similar to Javadoc). Set to NO to disable this.
106 # The default value is: YES.
107
108 BRIEF_MEMBER_DESC = YES
109
110 # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
111 # description of a member or function before the detailed description
112 #
113 # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
114 # brief descriptions will be completely suppressed.
115 # The default value is: YES.
116
117 REPEAT_BRIEF = YES
118
119 # This tag implements a quasi-intelligent brief description abbreviator that is
120 # used to form the text in various listings. Each string in this list, if found
121 # as the leading text of the brief description, will be stripped from the text
122 # and the result, after processing the whole list, is used as the annotated
123 # text. Otherwise, the brief description is used as-is. If left blank, the
124 # following values are used ($name is automatically replaced with the name of
125 # the entity):The $name class, The $name widget, The $name file, is, provides,
126 # specifies, contains, represents, a, an and the.
127
128 ABBREVIATE_BRIEF = "The $name class" \
129 "The $name widget" \
130 "The $name file" \
131 is \
132 provides \
133 specifies \
134 contains \
135 represents \
136 a \
137 an \
138 the
139
140 # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
141 # doxygen will generate a detailed section even if there is only a brief
142 # description.
143 # The default value is: NO.
144
145 ALWAYS_DETAILED_SEC = YES
146
147 # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
148 # inherited members of a class in the documentation of that class as if those
149 # members were ordinary class members. Constructors, destructors and assignment
150 # operators of the base classes will not be shown.
151 # The default value is: NO.
152
153 INLINE_INHERITED_MEMB = YES
154
155 # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
156 # before files name in the file list and in the header files. If set to NO the
157 # shortest path that makes the file name unique will be used
158 # The default value is: YES.
159
160 FULL_PATH_NAMES = YES
161
162 # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
163 # Stripping is only done if one of the specified strings matches the left-hand
164 # part of the path. The tag can be used to show relative paths in the file list.
165 # If left blank the directory from which doxygen is run is used as the path to
166 # strip.
167 #
168 # Note that you can specify absolute paths here, but also relative paths, which
169 # will be relative from the directory where doxygen is started.
170 # This tag requires that the tag FULL_PATH_NAMES is set to YES.
171
172 STRIP_FROM_PATH = include/foonathan/memory \
173 .
174
175 # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
176 # path mentioned in the documentation of a class, which tells the reader which
177 # header file to include in order to use a class. If left blank only the name of
178 # the header file containing the class definition is used. Otherwise one should
179 # specify the list of include paths that are normally passed to the compiler
180 # using the -I flag.
181
182 STRIP_FROM_INC_PATH =
183
184 # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
185 # less readable) file names. This can be useful is your file systems doesn't
186 # support long names like on DOS, Mac, or CD-ROM.
187 # The default value is: NO.
188
189 SHORT_NAMES = NO
190
191 # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
192 # first line (until the first dot) of a Javadoc-style comment as the brief
193 # description. If set to NO, the Javadoc-style will behave just like regular Qt-
194 # style comments (thus requiring an explicit @brief command for a brief
195 # description.)
196 # The default value is: NO.
197
198 JAVADOC_AUTOBRIEF = YES
199
200 # If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
201 # such as
202 # /***************
203 # as being the beginning of a Javadoc-style comment "banner". If set to NO, the
204 # Javadoc-style will behave just like regular comments and it will not be
205 # interpreted by doxygen.
206 # The default value is: NO.
207
208 JAVADOC_BANNER = NO
209
210 # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
211 # line (until the first dot) of a Qt-style comment as the brief description. If
212 # set to NO, the Qt-style will behave just like regular Qt-style comments (thus
213 # requiring an explicit \brief command for a brief description.)
214 # The default value is: NO.
215
216 QT_AUTOBRIEF = NO
217
218 # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
219 # multi-line C++ special comment block (i.e. a block of //! or /// comments) as
220 # a brief description. This used to be the default behavior. The new default is
221 # to treat a multi-line C++ comment block as a detailed description. Set this
222 # tag to YES if you prefer the old behavior instead.
223 #
224 # Note that setting this tag to YES also means that rational rose comments are
225 # not recognized any more.
226 # The default value is: NO.
227
228 MULTILINE_CPP_IS_BRIEF = NO
229
230 # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
231 # documentation from any documented member that it re-implements.
232 # The default value is: YES.
233
234 INHERIT_DOCS = YES
235
236 # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
237 # page for each member. If set to NO, the documentation of a member will be part
238 # of the file/class/namespace that contains it.
239 # The default value is: NO.
240
241 SEPARATE_MEMBER_PAGES = NO
242
243 # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
244 # uses this value to replace tabs by spaces in code fragments.
245 # Minimum value: 1, maximum value: 16, default value: 4.
246
247 TAB_SIZE = 4
248
249 # This tag can be used to specify a number of aliases that act as commands in
250 # the documentation. An alias has the form:
251 # name=value
252 # For example adding
253 # "sideeffect=@par Side Effects:\n"
254 # will allow you to put the command \sideeffect (or @sideeffect) in the
255 # documentation, which will result in a user-defined paragraph with heading
256 # "Side Effects:". You can put \n's in the value part of an alias to insert
257 # newlines (in the resulting output). You can put ^^ in the value part of an
258 # alias to insert a newline as if a physical newline was in the original file.
259 # When you need a literal { or } or , in the value part of an alias you have to
260 # escape them by means of a backslash (\), this can lead to conflicts with the
261 # commands \{ and \} for these it is advised to use the version @{ and @} or use
262 # a double escape (\\{ and \\})
263
264 ALIASES = "effects=\par <i>Effects:</i>^^" \
265 "returns=\par <i>Returns:</i>^^" \
266 "notes=\par <i>Notes:</i>^^" \
267 "throws=\par <i>Throws:</i>^^" \
268 "requires=\par <i>Requires:</i>^^" \
269 "requiredbe=\par <i>Required Behavior:</i>^^" \
270 "concept{2}=<a href=\"md_doc_concepts.html#\1\">\2</a>" \
271 "defaultbe=\par <i>Default Behavior:</i>^^"
272
273 # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
274 # only. Doxygen will then generate output that is more tailored for C. For
275 # instance, some of the names that are used will be different. The list of all
276 # members will be omitted, etc.
277 # The default value is: NO.
278
279 OPTIMIZE_OUTPUT_FOR_C = NO
280
281 # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
282 # Python sources only. Doxygen will then generate output that is more tailored
283 # for that language. For instance, namespaces will be presented as packages,
284 # qualified scopes will look different, etc.
285 # The default value is: NO.
286
287 OPTIMIZE_OUTPUT_JAVA = NO
288
289 # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
290 # sources. Doxygen will then generate output that is tailored for Fortran.
291 # The default value is: NO.
292
293 OPTIMIZE_FOR_FORTRAN = NO
294
295 # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
296 # sources. Doxygen will then generate output that is tailored for VHDL.
297 # The default value is: NO.
298
299 OPTIMIZE_OUTPUT_VHDL = NO
300
301 # Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
302 # sources only. Doxygen will then generate output that is more tailored for that
303 # language. For instance, namespaces will be presented as modules, types will be
304 # separated into more groups, etc.
305 # The default value is: NO.
306
307 OPTIMIZE_OUTPUT_SLICE = NO
308
309 # Doxygen selects the parser to use depending on the extension of the files it
310 # parses. With this tag you can assign which parser to use for a given
311 # extension. Doxygen has a built-in mapping, but you can override or extend it
312 # using this tag. The format is ext=language, where ext is a file extension, and
313 # language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
314 # Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
315 # Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
316 # FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
317 # tries to guess whether the code is fixed or free formatted code, this is the
318 # default for Fortran type files). For instance to make doxygen treat .inc files
319 # as Fortran files (default is PHP), and .f files as C (default is Fortran),
320 # use: inc=Fortran f=C.
321 #
322 # Note: For files without extension you can use no_extension as a placeholder.
323 #
324 # Note that for custom extensions you also need to set FILE_PATTERNS otherwise
325 # the files are not read by doxygen.
326
327 EXTENSION_MAPPING =
328
329 # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
330 # according to the Markdown format, which allows for more readable
331 # documentation. See https://daringfireball.net/projects/markdown/ for details.
332 # The output of markdown processing is further processed by doxygen, so you can
333 # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
334 # case of backward compatibilities issues.
335 # The default value is: YES.
336
337 MARKDOWN_SUPPORT = YES
338
339 # When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
340 # to that level are automatically included in the table of contents, even if
341 # they do not have an id attribute.
342 # Note: This feature currently applies only to Markdown headings.
343 # Minimum value: 0, maximum value: 99, default value: 5.
344 # This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
345
346 TOC_INCLUDE_HEADINGS = 5
347
348 # When enabled doxygen tries to link words that correspond to documented
349 # classes, or namespaces to their corresponding documentation. Such a link can
350 # be prevented in individual cases by putting a % sign in front of the word or
351 # globally by setting AUTOLINK_SUPPORT to NO.
352 # The default value is: YES.
353
354 AUTOLINK_SUPPORT = NO
355
356 # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
357 # to include (a tag file for) the STL sources as input, then you should set this
358 # tag to YES in order to let doxygen match functions declarations and
359 # definitions whose arguments contain STL classes (e.g. func(std::string);
360 # versus func(std::string) {}). This also make the inheritance and collaboration
361 # diagrams that involve STL classes more complete and accurate.
362 # The default value is: NO.
363
364 BUILTIN_STL_SUPPORT = YES
365
366 # If you use Microsoft's C++/CLI language, you should set this option to YES to
367 # enable parsing support.
368 # The default value is: NO.
369
370 CPP_CLI_SUPPORT = NO
371
372 # Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
373 # https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
374 # will parse them like normal C++ but will assume all classes use public instead
375 # of private inheritance when no explicit protection keyword is present.
376 # The default value is: NO.
377
378 SIP_SUPPORT = NO
379
380 # For Microsoft's IDL there are propget and propput attributes to indicate
381 # getter and setter methods for a property. Setting this option to YES will make
382 # doxygen to replace the get and set methods by a property in the documentation.
383 # This will only work if the methods are indeed getting or setting a simple
384 # type. If this is not the case, or you want to show the methods anyway, you
385 # should set this option to NO.
386 # The default value is: YES.
387
388 IDL_PROPERTY_SUPPORT = NO
389
390 # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
391 # tag is set to YES then doxygen will reuse the documentation of the first
392 # member in the group (if any) for the other members of the group. By default
393 # all members of a group must be documented explicitly.
394 # The default value is: NO.
395
396 DISTRIBUTE_GROUP_DOC = YES
397
398 # If one adds a struct or class to a group and this option is enabled, then also
399 # any nested class or struct is added to the same group. By default this option
400 # is disabled and one has to add nested compounds explicitly via \ingroup.
401 # The default value is: NO.
402
403 GROUP_NESTED_COMPOUNDS = NO
404
405 # Set the SUBGROUPING tag to YES to allow class member groups of the same type
406 # (for instance a group of public functions) to be put as a subgroup of that
407 # type (e.g. under the Public Functions section). Set it to NO to prevent
408 # subgrouping. Alternatively, this can be done per class using the
409 # \nosubgrouping command.
410 # The default value is: YES.
411
412 SUBGROUPING = YES
413
414 # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
415 # are shown inside the group in which they are included (e.g. using \ingroup)
416 # instead of on a separate page (for HTML and Man pages) or section (for LaTeX
417 # and RTF).
418 #
419 # Note that this feature does not work in combination with
420 # SEPARATE_MEMBER_PAGES.
421 # The default value is: NO.
422
423 INLINE_GROUPED_CLASSES = NO
424
425 # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
426 # with only public data fields or simple typedef fields will be shown inline in
427 # the documentation of the scope in which they are defined (i.e. file,
428 # namespace, or group documentation), provided this scope is documented. If set
429 # to NO, structs, classes, and unions are shown on a separate page (for HTML and
430 # Man pages) or section (for LaTeX and RTF).
431 # The default value is: NO.
432
433 INLINE_SIMPLE_STRUCTS = NO
434
435 # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
436 # enum is documented as struct, union, or enum with the name of the typedef. So
437 # typedef struct TypeS {} TypeT, will appear in the documentation as a struct
438 # with name TypeT. When disabled the typedef will appear as a member of a file,
439 # namespace, or class. And the struct will be named TypeS. This can typically be
440 # useful for C code in case the coding convention dictates that all compound
441 # types are typedef'ed and only the typedef is referenced, never the tag name.
442 # The default value is: NO.
443
444 TYPEDEF_HIDES_STRUCT = NO
445
446 # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
447 # cache is used to resolve symbols given their name and scope. Since this can be
448 # an expensive process and often the same symbol appears multiple times in the
449 # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
450 # doxygen will become slower. If the cache is too large, memory is wasted. The
451 # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
452 # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
453 # symbols. At the end of a run doxygen will report the cache usage and suggest
454 # the optimal cache size from a speed point of view.
455 # Minimum value: 0, maximum value: 9, default value: 0.
456
457 LOOKUP_CACHE_SIZE = 2
458
459 #---------------------------------------------------------------------------
460 # Build related configuration options
461 #---------------------------------------------------------------------------
462
463 # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
464 # documentation are documented, even if no documentation was available. Private
465 # class members and static file members will be hidden unless the
466 # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
467 # Note: This will also disable the warnings about undocumented members that are
468 # normally produced when WARNINGS is set to YES.
469 # The default value is: NO.
470
471 EXTRACT_ALL = NO
472
473 # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
474 # be included in the documentation.
475 # The default value is: NO.
476
477 EXTRACT_PRIVATE = NO
478
479 # If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
480 # methods of a class will be included in the documentation.
481 # The default value is: NO.
482
483 EXTRACT_PRIV_VIRTUAL = YES
484
485 # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
486 # scope will be included in the documentation.
487 # The default value is: NO.
488
489 EXTRACT_PACKAGE = NO
490
491 # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
492 # included in the documentation.
493 # The default value is: NO.
494
495 EXTRACT_STATIC = NO
496
497 # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
498 # locally in source files will be included in the documentation. If set to NO,
499 # only classes defined in header files are included. Does not have any effect
500 # for Java sources.
501 # The default value is: YES.
502
503 EXTRACT_LOCAL_CLASSES = NO
504
505 # This flag is only useful for Objective-C code. If set to YES, local methods,
506 # which are defined in the implementation section but not in the interface are
507 # included in the documentation. If set to NO, only methods in the interface are
508 # included.
509 # The default value is: NO.
510
511 EXTRACT_LOCAL_METHODS = NO
512
513 # If this flag is set to YES, the members of anonymous namespaces will be
514 # extracted and appear in the documentation as a namespace called
515 # 'anonymous_namespace{file}', where file will be replaced with the base name of
516 # the file that contains the anonymous namespace. By default anonymous namespace
517 # are hidden.
518 # The default value is: NO.
519
520 EXTRACT_ANON_NSPACES = NO
521
522 # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
523 # undocumented members inside documented classes or files. If set to NO these
524 # members will be included in the various overviews, but no documentation
525 # section is generated. This option has no effect if EXTRACT_ALL is enabled.
526 # The default value is: NO.
527
528 HIDE_UNDOC_MEMBERS = NO
529
530 # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
531 # undocumented classes that are normally visible in the class hierarchy. If set
532 # to NO, these classes will be included in the various overviews. This option
533 # has no effect if EXTRACT_ALL is enabled.
534 # The default value is: NO.
535
536 HIDE_UNDOC_CLASSES = YES
537
538 # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
539 # declarations. If set to NO, these declarations will be included in the
540 # documentation.
541 # The default value is: NO.
542
543 HIDE_FRIEND_COMPOUNDS = YES
544
545 # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
546 # documentation blocks found inside the body of a function. If set to NO, these
547 # blocks will be appended to the function's detailed documentation block.
548 # The default value is: NO.
549
550 HIDE_IN_BODY_DOCS = NO
551
552 # The INTERNAL_DOCS tag determines if documentation that is typed after a
553 # \internal command is included. If the tag is set to NO then the documentation
554 # will be excluded. Set it to YES to include the internal documentation.
555 # The default value is: NO.
556
557 INTERNAL_DOCS = NO
558
559 # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
560 # names in lower-case letters. If set to YES, upper-case letters are also
561 # allowed. This is useful if you have classes or files whose names only differ
562 # in case and if your file system supports case sensitive file names. Windows
563 # (including Cygwin) ands Mac users are advised to set this option to NO.
564 # The default value is: system dependent.
565
566 CASE_SENSE_NAMES = NO
567
568 # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
569 # their full class and namespace scopes in the documentation. If set to YES, the
570 # scope will be hidden.
571 # The default value is: NO.
572
573 HIDE_SCOPE_NAMES = YES
574
575 # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
576 # append additional text to a page's title, such as Class Reference. If set to
577 # YES the compound reference will be hidden.
578 # The default value is: NO.
579
580 HIDE_COMPOUND_REFERENCE= NO
581
582 # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
583 # the files that are included by a file in the documentation of that file.
584 # The default value is: YES.
585
586 SHOW_INCLUDE_FILES = NO
587
588 # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
589 # grouped member an include statement to the documentation, telling the reader
590 # which file to include in order to use the member.
591 # The default value is: NO.
592
593 SHOW_GROUPED_MEMB_INC = NO
594
595 # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
596 # files with double quotes in the documentation rather than with sharp brackets.
597 # The default value is: NO.
598
599 FORCE_LOCAL_INCLUDES = NO
600
601 # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
602 # documentation for inline members.
603 # The default value is: YES.
604
605 INLINE_INFO = NO
606
607 # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
608 # (detailed) documentation of file and class members alphabetically by member
609 # name. If set to NO, the members will appear in declaration order.
610 # The default value is: YES.
611
612 SORT_MEMBER_DOCS = NO
613
614 # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
615 # descriptions of file, namespace and class members alphabetically by member
616 # name. If set to NO, the members will appear in declaration order. Note that
617 # this will also influence the order of the classes in the class list.
618 # The default value is: NO.
619
620 SORT_BRIEF_DOCS = NO
621
622 # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
623 # (brief and detailed) documentation of class members so that constructors and
624 # destructors are listed first. If set to NO the constructors will appear in the
625 # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
626 # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
627 # member documentation.
628 # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
629 # detailed member documentation.
630 # The default value is: NO.
631
632 SORT_MEMBERS_CTORS_1ST = NO
633
634 # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
635 # of group names into alphabetical order. If set to NO the group names will
636 # appear in their defined order.
637 # The default value is: NO.
638
639 SORT_GROUP_NAMES = NO
640
641 # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
642 # fully-qualified names, including namespaces. If set to NO, the class list will
643 # be sorted only by class name, not including the namespace part.
644 # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
645 # Note: This option applies only to the class list, not to the alphabetical
646 # list.
647 # The default value is: NO.
648
649 SORT_BY_SCOPE_NAME = NO
650
651 # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
652 # type resolution of all parameters of a function it will reject a match between
653 # the prototype and the implementation of a member function even if there is
654 # only one candidate or it is obvious which candidate to choose by doing a
655 # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
656 # accept a match between prototype and implementation in such cases.
657 # The default value is: NO.
658
659 STRICT_PROTO_MATCHING = NO
660
661 # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
662 # list. This list is created by putting \todo commands in the documentation.
663 # The default value is: YES.
664
665 GENERATE_TODOLIST = NO
666
667 # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
668 # list. This list is created by putting \test commands in the documentation.
669 # The default value is: YES.
670
671 GENERATE_TESTLIST = NO
672
673 # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
674 # list. This list is created by putting \bug commands in the documentation.
675 # The default value is: YES.
676
677 GENERATE_BUGLIST = NO
678
679 # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
680 # the deprecated list. This list is created by putting \deprecated commands in
681 # the documentation.
682 # The default value is: YES.
683
684 GENERATE_DEPRECATEDLIST= YES
685
686 # The ENABLED_SECTIONS tag can be used to enable conditional documentation
687 # sections, marked by \if <section_label> ... \endif and \cond <section_label>
688 # ... \endcond blocks.
689
690 ENABLED_SECTIONS =
691
692 # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
693 # initial value of a variable or macro / define can have for it to appear in the
694 # documentation. If the initializer consists of more lines than specified here
695 # it will be hidden. Use a value of 0 to hide initializers completely. The
696 # appearance of the value of individual variables and macros / defines can be
697 # controlled using \showinitializer or \hideinitializer command in the
698 # documentation regardless of this setting.
699 # Minimum value: 0, maximum value: 10000, default value: 30.
700
701 MAX_INITIALIZER_LINES = 0
702
703 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
704 # the bottom of the documentation of classes and structs. If set to YES, the
705 # list will mention the files that were used to generate the documentation.
706 # The default value is: YES.
707
708 SHOW_USED_FILES = YES
709
710 # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
711 # will remove the Files entry from the Quick Index and from the Folder Tree View
712 # (if specified).
713 # The default value is: YES.
714
715 SHOW_FILES = YES
716
717 # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
718 # page. This will remove the Namespaces entry from the Quick Index and from the
719 # Folder Tree View (if specified).
720 # The default value is: YES.
721
722 SHOW_NAMESPACES = YES
723
724 # The FILE_VERSION_FILTER tag can be used to specify a program or script that
725 # doxygen should invoke to get the current version for each file (typically from
726 # the version control system). Doxygen will invoke the program by executing (via
727 # popen()) the command command input-file, where command is the value of the
728 # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
729 # by doxygen. Whatever the program writes to standard output is used as the file
730 # version. For an example see the documentation.
731
732 FILE_VERSION_FILTER =
733
734 # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
735 # by doxygen. The layout file controls the global structure of the generated
736 # output files in an output format independent way. To create the layout file
737 # that represents doxygen's defaults, run doxygen with the -l option. You can
738 # optionally specify a file name after the option, if omitted DoxygenLayout.xml
739 # will be used as the name of the layout file.
740 #
741 # Note that if you run doxygen from a directory containing a file called
742 # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
743 # tag is left empty.
744
745 LAYOUT_FILE = doc/DoxygenLayout.xml
746
747 # The CITE_BIB_FILES tag can be used to specify one or more bib files containing
748 # the reference definitions. This must be a list of .bib files. The .bib
749 # extension is automatically appended if omitted. This requires the bibtex tool
750 # to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
751 # For LaTeX the style of the bibliography can be controlled using
752 # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
753 # search path. See also \cite for info how to create references.
754
755 CITE_BIB_FILES =
756
757 #---------------------------------------------------------------------------
758 # Configuration options related to warning and progress messages
759 #---------------------------------------------------------------------------
760
761 # The QUIET tag can be used to turn on/off the messages that are generated to
762 # standard output by doxygen. If QUIET is set to YES this implies that the
763 # messages are off.
764 # The default value is: NO.
765
766 QUIET = YES
767
768 # The WARNINGS tag can be used to turn on/off the warning messages that are
769 # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
770 # this implies that the warnings are on.
771 #
772 # Tip: Turn warnings on while writing the documentation.
773 # The default value is: YES.
774
775 WARNINGS = YES
776
777 # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
778 # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
779 # will automatically be disabled.
780 # The default value is: YES.
781
782 WARN_IF_UNDOCUMENTED = NO
783
784 # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
785 # potential errors in the documentation, such as not documenting some parameters
786 # in a documented function, or documenting parameters that don't exist or using
787 # markup commands wrongly.
788 # The default value is: YES.
789
790 WARN_IF_DOC_ERROR = YES
791
792 # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
793 # are documented, but have no documentation for their parameters or return
794 # value. If set to NO, doxygen will only warn about wrong or incomplete
795 # parameter documentation, but not about the absence of documentation. If
796 # EXTRACT_ALL is set to YES then this flag will automatically be disabled.
797 # The default value is: NO.
798
799 WARN_NO_PARAMDOC = NO
800
801 # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
802 # a warning is encountered.
803 # The default value is: NO.
804
805 WARN_AS_ERROR = NO
806
807 # The WARN_FORMAT tag determines the format of the warning messages that doxygen
808 # can produce. The string should contain the $file, $line, and $text tags, which
809 # will be replaced by the file and line number from which the warning originated
810 # and the warning text. Optionally the format may contain $version, which will
811 # be replaced by the version of the file (if it could be obtained via
812 # FILE_VERSION_FILTER)
813 # The default value is: $file:$line: $text.
814
815 WARN_FORMAT = "$file:$line: $text"
816
817 # The WARN_LOGFILE tag can be used to specify a file to which warning and error
818 # messages should be written. If left blank the output is written to standard
819 # error (stderr).
820
821 WARN_LOGFILE =
822
823 #---------------------------------------------------------------------------
824 # Configuration options related to the input files
825 #---------------------------------------------------------------------------
826
827 # The INPUT tag is used to specify the files and/or directories that contain
828 # documented source files. You may enter file names like myfile.cpp or
829 # directories like /usr/src/myproject. Separate the files or directories with
830 # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
831 # Note: If this tag is empty the current directory is searched.
832
833 INPUT = .
834
835 # This tag can be used to specify the character encoding of the source files
836 # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
837 # libiconv (or the iconv built into libc) for the transcoding. See the libiconv
838 # documentation (see: https://www.gnu.org/software/libiconv/) for the list of
839 # possible encodings.
840 # The default value is: UTF-8.
841
842 INPUT_ENCODING = UTF-8
843
844 # If the value of the INPUT tag contains directories, you can use the
845 # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
846 # *.h) to filter out the source-files in the directories.
847 #
848 # Note that for custom extensions or not directly supported extensions you also
849 # need to set EXTENSION_MAPPING for the extension otherwise the files are not
850 # read by doxygen.
851 #
852 # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
853 # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
854 # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
855 # *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
856 # *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
857 # C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
858 # *.vhdl, *.ucf, *.qsf and *.ice.
859
860 FILE_PATTERNS = *.hpp \
861 *.md
862
863 # The RECURSIVE tag can be used to specify whether or not subdirectories should
864 # be searched for input files as well.
865 # The default value is: NO.
866
867 RECURSIVE = YES
868
869 # The EXCLUDE tag can be used to specify files and/or directories that should be
870 # excluded from the INPUT source files. This way you can easily exclude a
871 # subdirectory from a directory tree whose root is specified with the INPUT tag.
872 #
873 # Note that relative paths are relative to the directory from which doxygen is
874 # run.
875
876 EXCLUDE = src/ \
877 include/foonathan/memory/detail/ \
878 tool/ \
879 test/ \
880 example/ \
881 cmake/ \
882 README.md
883
884 # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
885 # directories that are symbolic links (a Unix file system feature) are excluded
886 # from the input.
887 # The default value is: NO.
888
889 EXCLUDE_SYMLINKS = NO
890
891 # If the value of the INPUT tag contains directories, you can use the
892 # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
893 # certain files from those directories.
894 #
895 # Note that the wildcards are matched against the file with absolute path, so to
896 # exclude all test directories for example use the pattern */test/*
897
898 EXCLUDE_PATTERNS =
899
900 # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
901 # (namespaces, classes, functions, etc.) that should be excluded from the
902 # output. The symbol name can be a fully qualified name, a word, or if the
903 # wildcard * is used, a substring. Examples: ANamespace, AClass,
904 # AClass::ANamespace, ANamespace::*Test
905 #
906 # Note that the wildcards are matched against the file with absolute path, so to
907 # exclude all test directories use the pattern */test/*
908
909 EXCLUDE_SYMBOLS =
910
911 # The EXAMPLE_PATH tag can be used to specify one or more files or directories
912 # that contain example code fragments that are included (see the \include
913 # command).
914
915 EXAMPLE_PATH =
916
917 # If the value of the EXAMPLE_PATH tag contains directories, you can use the
918 # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
919 # *.h) to filter out the source-files in the directories. If left blank all
920 # files are included.
921
922 EXAMPLE_PATTERNS = *
923
924 # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
925 # searched for input files to be used with the \include or \dontinclude commands
926 # irrespective of the value of the RECURSIVE tag.
927 # The default value is: NO.
928
929 EXAMPLE_RECURSIVE = NO
930
931 # The IMAGE_PATH tag can be used to specify one or more files or directories
932 # that contain images that are to be included in the documentation (see the
933 # \image command).
934
935 IMAGE_PATH =
936
937 # The INPUT_FILTER tag can be used to specify a program that doxygen should
938 # invoke to filter for each input file. Doxygen will invoke the filter program
939 # by executing (via popen()) the command:
940 #
941 # <filter> <input-file>
942 #
943 # where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
944 # name of an input file. Doxygen will then use the output that the filter
945 # program writes to standard output. If FILTER_PATTERNS is specified, this tag
946 # will be ignored.
947 #
948 # Note that the filter must not add or remove lines; it is applied before the
949 # code is scanned, but not when the output code is generated. If lines are added
950 # or removed, the anchors will not be placed correctly.
951 #
952 # Note that for custom extensions or not directly supported extensions you also
953 # need to set EXTENSION_MAPPING for the extension otherwise the files are not
954 # properly processed by doxygen.
955
956 INPUT_FILTER =
957
958 # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
959 # basis. Doxygen will compare the file name with each pattern and apply the
960 # filter if there is a match. The filters are a list of the form: pattern=filter
961 # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
962 # filters are used. If the FILTER_PATTERNS tag is empty or if none of the
963 # patterns match the file name, INPUT_FILTER is applied.
964 #
965 # Note that for custom extensions or not directly supported extensions you also
966 # need to set EXTENSION_MAPPING for the extension otherwise the files are not
967 # properly processed by doxygen.
968
969 FILTER_PATTERNS =
970
971 # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
972 # INPUT_FILTER) will also be used to filter the input files that are used for
973 # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
974 # The default value is: NO.
975
976 FILTER_SOURCE_FILES = NO
977
978 # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
979 # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
980 # it is also possible to disable source filtering for a specific pattern using
981 # *.ext= (so without naming a filter).
982 # This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
983
984 FILTER_SOURCE_PATTERNS =
985
986 # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
987 # is part of the input, its contents will be placed on the main page
988 # (index.html). This can be useful if you have a project on for instance GitHub
989 # and want to reuse the introduction page also for the doxygen output.
990
991 USE_MDFILE_AS_MAINPAGE = doc/index.md
992
993 #---------------------------------------------------------------------------
994 # Configuration options related to source browsing
995 #---------------------------------------------------------------------------
996
997 # If the SOURCE_BROWSER tag is set to YES then a list of source files will be
998 # generated. Documented entities will be cross-referenced with these sources.
999 #
1000 # Note: To get rid of all source code in the generated output, make sure that
1001 # also VERBATIM_HEADERS is set to NO.
1002 # The default value is: NO.
1003
1004 SOURCE_BROWSER = NO
1005
1006 # Setting the INLINE_SOURCES tag to YES will include the body of functions,
1007 # classes and enums directly into the documentation.
1008 # The default value is: NO.
1009
1010 INLINE_SOURCES = NO
1011
1012 # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
1013 # special comment blocks from generated source code fragments. Normal C, C++ and
1014 # Fortran comments will always remain visible.
1015 # The default value is: YES.
1016
1017 STRIP_CODE_COMMENTS = YES
1018
1019 # If the REFERENCED_BY_RELATION tag is set to YES then for each documented
1020 # entity all documented functions referencing it will be listed.
1021 # The default value is: NO.
1022
1023 REFERENCED_BY_RELATION = NO
1024
1025 # If the REFERENCES_RELATION tag is set to YES then for each documented function
1026 # all documented entities called/used by that function will be listed.
1027 # The default value is: NO.
1028
1029 REFERENCES_RELATION = NO
1030
1031 # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
1032 # to YES then the hyperlinks from functions in REFERENCES_RELATION and
1033 # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
1034 # link to the documentation.
1035 # The default value is: YES.
1036
1037 REFERENCES_LINK_SOURCE = YES
1038
1039 # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
1040 # source code will show a tooltip with additional information such as prototype,
1041 # brief description and links to the definition and documentation. Since this
1042 # will make the HTML file larger and loading of large files a bit slower, you
1043 # can opt to disable this feature.
1044 # The default value is: YES.
1045 # This tag requires that the tag SOURCE_BROWSER is set to YES.
1046
1047 SOURCE_TOOLTIPS = YES
1048
1049 # If the USE_HTAGS tag is set to YES then the references to source code will
1050 # point to the HTML generated by the htags(1) tool instead of doxygen built-in
1051 # source browser. The htags tool is part of GNU's global source tagging system
1052 # (see https://www.gnu.org/software/global/global.html). You will need version
1053 # 4.8.6 or higher.
1054 #
1055 # To use it do the following:
1056 # - Install the latest version of global
1057 # - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
1058 # - Make sure the INPUT points to the root of the source tree
1059 # - Run doxygen as normal
1060 #
1061 # Doxygen will invoke htags (and that will in turn invoke gtags), so these
1062 # tools must be available from the command line (i.e. in the search path).
1063 #
1064 # The result: instead of the source browser generated by doxygen, the links to
1065 # source code will now point to the output of htags.
1066 # The default value is: NO.
1067 # This tag requires that the tag SOURCE_BROWSER is set to YES.
1068
1069 USE_HTAGS = NO
1070
1071 # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
1072 # verbatim copy of the header file for each class for which an include is
1073 # specified. Set to NO to disable this.
1074 # See also: Section \class.
1075 # The default value is: YES.
1076
1077 VERBATIM_HEADERS = NO
1078
1079 #---------------------------------------------------------------------------
1080 # Configuration options related to the alphabetical class index
1081 #---------------------------------------------------------------------------
1082
1083 # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
1084 # compounds will be generated. Enable this if the project contains a lot of
1085 # classes, structs, unions or interfaces.
1086 # The default value is: YES.
1087
1088 ALPHABETICAL_INDEX = YES
1089
1090 # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
1091 # which the alphabetical index list will be split.
1092 # Minimum value: 1, maximum value: 20, default value: 5.
1093 # This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
1094
1095 COLS_IN_ALPHA_INDEX = 5
1096
1097 # In case all classes in a project start with a common prefix, all classes will
1098 # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
1099 # can be used to specify a prefix (or a list of prefixes) that should be ignored
1100 # while generating the index headers.
1101 # This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
1102
1103 IGNORE_PREFIX =
1104
1105 #---------------------------------------------------------------------------
1106 # Configuration options related to the HTML output
1107 #---------------------------------------------------------------------------
1108
1109 # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
1110 # The default value is: YES.
1111
1112 GENERATE_HTML = YES
1113
1114 # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
1115 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
1116 # it.
1117 # The default directory is: html.
1118 # This tag requires that the tag GENERATE_HTML is set to YES.
1119
1120 HTML_OUTPUT = html
1121
1122 # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
1123 # generated HTML page (for example: .htm, .php, .asp).
1124 # The default value is: .html.
1125 # This tag requires that the tag GENERATE_HTML is set to YES.
1126
1127 HTML_FILE_EXTENSION = .html
1128
1129 # The HTML_HEADER tag can be used to specify a user-defined HTML header file for
1130 # each generated HTML page. If the tag is left blank doxygen will generate a
1131 # standard header.
1132 #
1133 # To get valid HTML the header file that includes any scripts and style sheets
1134 # that doxygen needs, which is dependent on the configuration options used (e.g.
1135 # the setting GENERATE_TREEVIEW). It is highly recommended to start with a
1136 # default header using
1137 # doxygen -w html new_header.html new_footer.html new_stylesheet.css
1138 # YourConfigFile
1139 # and then modify the file new_header.html. See also section "Doxygen usage"
1140 # for information on how to generate the default header that doxygen normally
1141 # uses.
1142 # Note: The header is subject to change so you typically have to regenerate the
1143 # default header when upgrading to a newer version of doxygen. For a description
1144 # of the possible markers and block names see the documentation.
1145 # This tag requires that the tag GENERATE_HTML is set to YES.
1146
1147 HTML_HEADER = doc/header.html
1148
1149 # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
1150 # generated HTML page. If the tag is left blank doxygen will generate a standard
1151 # footer. See HTML_HEADER for more information on how to generate a default
1152 # footer and what special commands can be used inside the footer. See also
1153 # section "Doxygen usage" for information on how to generate the default footer
1154 # that doxygen normally uses.
1155 # This tag requires that the tag GENERATE_HTML is set to YES.
1156
1157 HTML_FOOTER = doc/footer.html
1158
1159 # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
1160 # sheet that is used by each HTML page. It can be used to fine-tune the look of
1161 # the HTML output. If left blank doxygen will generate a default style sheet.
1162 # See also section "Doxygen usage" for information on how to generate the style
1163 # sheet that doxygen normally uses.
1164 # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
1165 # it is more robust and this tag (HTML_STYLESHEET) will in the future become
1166 # obsolete.
1167 # This tag requires that the tag GENERATE_HTML is set to YES.
1168
1169 HTML_STYLESHEET =
1170
1171 # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
1172 # cascading style sheets that are included after the standard style sheets
1173 # created by doxygen. Using this option one can overrule certain style aspects.
1174 # This is preferred over using HTML_STYLESHEET since it does not replace the
1175 # standard style sheet and is therefore more robust against future updates.
1176 # Doxygen will copy the style sheet files to the output directory.
1177 # Note: The order of the extra style sheet files is of importance (e.g. the last
1178 # style sheet in the list overrules the setting of the previous ones in the
1179 # list). For an example see the documentation.
1180 # This tag requires that the tag GENERATE_HTML is set to YES.
1181
1182 HTML_EXTRA_STYLESHEET =
1183
1184 # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
1185 # other source files which should be copied to the HTML output directory. Note
1186 # that these files will be copied to the base HTML output directory. Use the
1187 # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
1188 # files. In the HTML_STYLESHEET file, use the file name only. Also note that the
1189 # files will be copied as-is; there are no commands or markers available.
1190 # This tag requires that the tag GENERATE_HTML is set to YES.
1191
1192 HTML_EXTRA_FILES =
1193
1194 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
1195 # will adjust the colors in the style sheet and background images according to
1196 # this color. Hue is specified as an angle on a colorwheel, see
1197 # https://en.wikipedia.org/wiki/Hue for more information. For instance the value
1198 # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
1199 # purple, and 360 is red again.
1200 # Minimum value: 0, maximum value: 359, default value: 220.
1201 # This tag requires that the tag GENERATE_HTML is set to YES.
1202
1203 HTML_COLORSTYLE_HUE = 220
1204
1205 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
1206 # in the HTML output. For a value of 0 the output will use grayscales only. A
1207 # value of 255 will produce the most vivid colors.
1208 # Minimum value: 0, maximum value: 255, default value: 100.
1209 # This tag requires that the tag GENERATE_HTML is set to YES.
1210
1211 HTML_COLORSTYLE_SAT = 100
1212
1213 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
1214 # luminance component of the colors in the HTML output. Values below 100
1215 # gradually make the output lighter, whereas values above 100 make the output
1216 # darker. The value divided by 100 is the actual gamma applied, so 80 represents
1217 # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
1218 # change the gamma.
1219 # Minimum value: 40, maximum value: 240, default value: 80.
1220 # This tag requires that the tag GENERATE_HTML is set to YES.
1221
1222 HTML_COLORSTYLE_GAMMA = 80
1223
1224 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
1225 # page will contain the date and time when the page was generated. Setting this
1226 # to YES can help to show when doxygen was last run and thus if the
1227 # documentation is up to date.
1228 # The default value is: NO.
1229 # This tag requires that the tag GENERATE_HTML is set to YES.
1230
1231 HTML_TIMESTAMP = NO
1232
1233 # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
1234 # documentation will contain a main index with vertical navigation menus that
1235 # are dynamically created via JavaScript. If disabled, the navigation index will
1236 # consists of multiple levels of tabs that are statically embedded in every HTML
1237 # page. Disable this option to support browsers that do not have JavaScript,
1238 # like the Qt help browser.
1239 # The default value is: YES.
1240 # This tag requires that the tag GENERATE_HTML is set to YES.
1241
1242 HTML_DYNAMIC_MENUS = YES
1243
1244 # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
1245 # documentation will contain sections that can be hidden and shown after the
1246 # page has loaded.
1247 # The default value is: NO.
1248 # This tag requires that the tag GENERATE_HTML is set to YES.
1249
1250 HTML_DYNAMIC_SECTIONS = NO
1251
1252 # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
1253 # shown in the various tree structured indices initially; the user can expand
1254 # and collapse entries dynamically later on. Doxygen will expand the tree to
1255 # such a level that at most the specified number of entries are visible (unless
1256 # a fully collapsed tree already exceeds this amount). So setting the number of
1257 # entries 1 will produce a full collapsed tree by default. 0 is a special value
1258 # representing an infinite number of entries and will result in a full expanded
1259 # tree by default.
1260 # Minimum value: 0, maximum value: 9999, default value: 100.
1261 # This tag requires that the tag GENERATE_HTML is set to YES.
1262
1263 HTML_INDEX_NUM_ENTRIES = 100
1264
1265 # If the GENERATE_DOCSET tag is set to YES, additional index files will be
1266 # generated that can be used as input for Apple's Xcode 3 integrated development
1267 # environment (see: https://developer.apple.com/xcode/), introduced with OSX
1268 # 10.5 (Leopard). To create a documentation set, doxygen will generate a
1269 # Makefile in the HTML output directory. Running make will produce the docset in
1270 # that directory and running make install will install the docset in
1271 # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
1272 # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
1273 # genXcode/_index.html for more information.
1274 # The default value is: NO.
1275 # This tag requires that the tag GENERATE_HTML is set to YES.
1276
1277 GENERATE_DOCSET = NO
1278
1279 # This tag determines the name of the docset feed. A documentation feed provides
1280 # an umbrella under which multiple documentation sets from a single provider
1281 # (such as a company or product suite) can be grouped.
1282 # The default value is: Doxygen generated docs.
1283 # This tag requires that the tag GENERATE_DOCSET is set to YES.
1284
1285 DOCSET_FEEDNAME = "Doxygen generated docs"
1286
1287 # This tag specifies a string that should uniquely identify the documentation
1288 # set bundle. This should be a reverse domain-name style string, e.g.
1289 # com.mycompany.MyDocSet. Doxygen will append .docset to the name.
1290 # The default value is: org.doxygen.Project.
1291 # This tag requires that the tag GENERATE_DOCSET is set to YES.
1292
1293 DOCSET_BUNDLE_ID = org.doxygen.Project
1294
1295 # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
1296 # the documentation publisher. This should be a reverse domain-name style
1297 # string, e.g. com.mycompany.MyDocSet.documentation.
1298 # The default value is: org.doxygen.Publisher.
1299 # This tag requires that the tag GENERATE_DOCSET is set to YES.
1300
1301 DOCSET_PUBLISHER_ID = org.doxygen.Publisher
1302
1303 # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
1304 # The default value is: Publisher.
1305 # This tag requires that the tag GENERATE_DOCSET is set to YES.
1306
1307 DOCSET_PUBLISHER_NAME = Publisher
1308
1309 # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
1310 # additional HTML index files: index.hhp, index.hhc, and index.hhk. The
1311 # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
1312 # (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
1313 # Windows.
1314 #
1315 # The HTML Help Workshop contains a compiler that can convert all HTML output
1316 # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
1317 # files are now used as the Windows 98 help format, and will replace the old
1318 # Windows help format (.hlp) on all Windows platforms in the future. Compressed
1319 # HTML files also contain an index, a table of contents, and you can search for
1320 # words in the documentation. The HTML workshop also contains a viewer for
1321 # compressed HTML files.
1322 # The default value is: NO.
1323 # This tag requires that the tag GENERATE_HTML is set to YES.
1324
1325 GENERATE_HTMLHELP = NO
1326
1327 # The CHM_FILE tag can be used to specify the file name of the resulting .chm
1328 # file. You can add a path in front of the file if the result should not be
1329 # written to the html output directory.
1330 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
1331
1332 CHM_FILE =
1333
1334 # The HHC_LOCATION tag can be used to specify the location (absolute path
1335 # including file name) of the HTML help compiler (hhc.exe). If non-empty,
1336 # doxygen will try to run the HTML help compiler on the generated index.hhp.
1337 # The file has to be specified with full path.
1338 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
1339
1340 HHC_LOCATION =
1341
1342 # The GENERATE_CHI flag controls if a separate .chi index file is generated
1343 # (YES) or that it should be included in the master .chm file (NO).
1344 # The default value is: NO.
1345 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
1346
1347 GENERATE_CHI = NO
1348
1349 # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
1350 # and project file content.
1351 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
1352
1353 CHM_INDEX_ENCODING =
1354
1355 # The BINARY_TOC flag controls whether a binary table of contents is generated
1356 # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
1357 # enables the Previous and Next buttons.
1358 # The default value is: NO.
1359 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
1360
1361 BINARY_TOC = NO
1362
1363 # The TOC_EXPAND flag can be set to YES to add extra items for group members to
1364 # the table of contents of the HTML help documentation and to the tree view.
1365 # The default value is: NO.
1366 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
1367
1368 TOC_EXPAND = NO
1369
1370 # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
1371 # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
1372 # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
1373 # (.qch) of the generated HTML documentation.
1374 # The default value is: NO.
1375 # This tag requires that the tag GENERATE_HTML is set to YES.
1376
1377 GENERATE_QHP = NO
1378
1379 # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
1380 # the file name of the resulting .qch file. The path specified is relative to
1381 # the HTML output folder.
1382 # This tag requires that the tag GENERATE_QHP is set to YES.
1383
1384 QCH_FILE =
1385
1386 # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
1387 # Project output. For more information please see Qt Help Project / Namespace
1388 # (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
1389 # The default value is: org.doxygen.Project.
1390 # This tag requires that the tag GENERATE_QHP is set to YES.
1391
1392 QHP_NAMESPACE = org.doxygen.Project
1393
1394 # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
1395 # Help Project output. For more information please see Qt Help Project / Virtual
1396 # Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
1397 # folders).
1398 # The default value is: doc.
1399 # This tag requires that the tag GENERATE_QHP is set to YES.
1400
1401 QHP_VIRTUAL_FOLDER = doc
1402
1403 # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
1404 # filter to add. For more information please see Qt Help Project / Custom
1405 # Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
1406 # filters).
1407 # This tag requires that the tag GENERATE_QHP is set to YES.
1408
1409 QHP_CUST_FILTER_NAME =
1410
1411 # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
1412 # custom filter to add. For more information please see Qt Help Project / Custom
1413 # Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
1414 # filters).
1415 # This tag requires that the tag GENERATE_QHP is set to YES.
1416
1417 QHP_CUST_FILTER_ATTRS =
1418
1419 # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
1420 # project's filter section matches. Qt Help Project / Filter Attributes (see:
1421 # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
1422 # This tag requires that the tag GENERATE_QHP is set to YES.
1423
1424 QHP_SECT_FILTER_ATTRS =
1425
1426 # The QHG_LOCATION tag can be used to specify the location of Qt's
1427 # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
1428 # generated .qhp file.
1429 # This tag requires that the tag GENERATE_QHP is set to YES.
1430
1431 QHG_LOCATION =
1432
1433 # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
1434 # generated, together with the HTML files, they form an Eclipse help plugin. To
1435 # install this plugin and make it available under the help contents menu in
1436 # Eclipse, the contents of the directory containing the HTML and XML files needs
1437 # to be copied into the plugins directory of eclipse. The name of the directory
1438 # within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
1439 # After copying Eclipse needs to be restarted before the help appears.
1440 # The default value is: NO.
1441 # This tag requires that the tag GENERATE_HTML is set to YES.
1442
1443 GENERATE_ECLIPSEHELP = NO
1444
1445 # A unique identifier for the Eclipse help plugin. When installing the plugin
1446 # the directory name containing the HTML and XML files should also have this
1447 # name. Each documentation set should have its own identifier.
1448 # The default value is: org.doxygen.Project.
1449 # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
1450
1451 ECLIPSE_DOC_ID = org.doxygen.Project
1452
1453 # If you want full control over the layout of the generated HTML pages it might
1454 # be necessary to disable the index and replace it with your own. The
1455 # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
1456 # of each HTML page. A value of NO enables the index and the value YES disables
1457 # it. Since the tabs in the index contain the same information as the navigation
1458 # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
1459 # The default value is: NO.
1460 # This tag requires that the tag GENERATE_HTML is set to YES.
1461
1462 DISABLE_INDEX = YES
1463
1464 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
1465 # structure should be generated to display hierarchical information. If the tag
1466 # value is set to YES, a side panel will be generated containing a tree-like
1467 # index structure (just like the one that is generated for HTML Help). For this
1468 # to work a browser that supports JavaScript, DHTML, CSS and frames is required
1469 # (i.e. any modern browser). Windows users are probably better off using the
1470 # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
1471 # further fine-tune the look of the index. As an example, the default style
1472 # sheet generated by doxygen has an example that shows how to put an image at
1473 # the root of the tree instead of the PROJECT_NAME. Since the tree basically has
1474 # the same information as the tab index, you could consider setting
1475 # DISABLE_INDEX to YES when enabling this option.
1476 # The default value is: NO.
1477 # This tag requires that the tag GENERATE_HTML is set to YES.
1478
1479 GENERATE_TREEVIEW = NO
1480
1481 # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
1482 # doxygen will group on one line in the generated HTML documentation.
1483 #
1484 # Note that a value of 0 will completely suppress the enum values from appearing
1485 # in the overview section.
1486 # Minimum value: 0, maximum value: 20, default value: 4.
1487 # This tag requires that the tag GENERATE_HTML is set to YES.
1488
1489 ENUM_VALUES_PER_LINE = 4
1490
1491 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
1492 # to set the initial width (in pixels) of the frame in which the tree is shown.
1493 # Minimum value: 0, maximum value: 1500, default value: 250.
1494 # This tag requires that the tag GENERATE_HTML is set to YES.
1495
1496 TREEVIEW_WIDTH = 250
1497
1498 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
1499 # external symbols imported via tag files in a separate window.
1500 # The default value is: NO.
1501 # This tag requires that the tag GENERATE_HTML is set to YES.
1502
1503 EXT_LINKS_IN_WINDOW = NO
1504
1505 # If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
1506 # tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
1507 # https://inkscape.org) to generate formulas as SVG images instead of PNGs for
1508 # the HTML output. These images will generally look nicer at scaled resolutions.
1509 # Possible values are: png The default and svg Looks nicer but requires the
1510 # pdf2svg tool.
1511 # The default value is: png.
1512 # This tag requires that the tag GENERATE_HTML is set to YES.
1513
1514 HTML_FORMULA_FORMAT = png
1515
1516 # Use this tag to change the font size of LaTeX formulas included as images in
1517 # the HTML documentation. When you change the font size after a successful
1518 # doxygen run you need to manually remove any form_*.png images from the HTML
1519 # output directory to force them to be regenerated.
1520 # Minimum value: 8, maximum value: 50, default value: 10.
1521 # This tag requires that the tag GENERATE_HTML is set to YES.
1522
1523 FORMULA_FONTSIZE = 10
1524
1525 # Use the FORMULA_TRANSPARENT tag to determine whether or not the images
1526 # generated for formulas are transparent PNGs. Transparent PNGs are not
1527 # supported properly for IE 6.0, but are supported on all modern browsers.
1528 #
1529 # Note that when changing this option you need to delete any form_*.png files in
1530 # the HTML output directory before the changes have effect.
1531 # The default value is: YES.
1532 # This tag requires that the tag GENERATE_HTML is set to YES.
1533
1534 FORMULA_TRANSPARENT = YES
1535
1536 # The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
1537 # to create new LaTeX commands to be used in formulas as building blocks. See
1538 # the section "Including formulas" for details.
1539
1540 FORMULA_MACROFILE =
1541
1542 # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
1543 # https://www.mathjax.org) which uses client side JavaScript for the rendering
1544 # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
1545 # installed or if you want to formulas look prettier in the HTML output. When
1546 # enabled you may also need to install MathJax separately and configure the path
1547 # to it using the MATHJAX_RELPATH option.
1548 # The default value is: NO.
1549 # This tag requires that the tag GENERATE_HTML is set to YES.
1550
1551 USE_MATHJAX = NO
1552
1553 # When MathJax is enabled you can set the default output format to be used for
1554 # the MathJax output. See the MathJax site (see:
1555 # http://docs.mathjax.org/en/latest/output.html) for more details.
1556 # Possible values are: HTML-CSS (which is slower, but has the best
1557 # compatibility), NativeMML (i.e. MathML) and SVG.
1558 # The default value is: HTML-CSS.
1559 # This tag requires that the tag USE_MATHJAX is set to YES.
1560
1561 MATHJAX_FORMAT = HTML-CSS
1562
1563 # When MathJax is enabled you need to specify the location relative to the HTML
1564 # output directory using the MATHJAX_RELPATH option. The destination directory
1565 # should contain the MathJax.js script. For instance, if the mathjax directory
1566 # is located at the same level as the HTML output directory, then
1567 # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
1568 # Content Delivery Network so you can quickly see the result without installing
1569 # MathJax. However, it is strongly recommended to install a local copy of
1570 # MathJax from https://www.mathjax.org before deployment.
1571 # The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
1572 # This tag requires that the tag USE_MATHJAX is set to YES.
1573
1574 MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
1575
1576 # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
1577 # extension names that should be enabled during MathJax rendering. For example
1578 # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
1579 # This tag requires that the tag USE_MATHJAX is set to YES.
1580
1581 MATHJAX_EXTENSIONS =
1582
1583 # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
1584 # of code that will be used on startup of the MathJax code. See the MathJax site
1585 # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
1586 # example see the documentation.
1587 # This tag requires that the tag USE_MATHJAX is set to YES.
1588
1589 MATHJAX_CODEFILE =
1590
1591 # When the SEARCHENGINE tag is enabled doxygen will generate a search box for
1592 # the HTML output. The underlying search engine uses javascript and DHTML and
1593 # should work on any modern browser. Note that when using HTML help
1594 # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
1595 # there is already a search function so this one should typically be disabled.
1596 # For large projects the javascript based search engine can be slow, then
1597 # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
1598 # search using the keyboard; to jump to the search box use <access key> + S
1599 # (what the <access key> is depends on the OS and browser, but it is typically
1600 # <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
1601 # key> to jump into the search results window, the results can be navigated
1602 # using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
1603 # the search. The filter options can be selected when the cursor is inside the
1604 # search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
1605 # to select a filter and <Enter> or <escape> to activate or cancel the filter
1606 # option.
1607 # The default value is: YES.
1608 # This tag requires that the tag GENERATE_HTML is set to YES.
1609
1610 SEARCHENGINE = NO
1611
1612 # When the SERVER_BASED_SEARCH tag is enabled the search engine will be
1613 # implemented using a web server instead of a web client using JavaScript. There
1614 # are two flavors of web server based searching depending on the EXTERNAL_SEARCH
1615 # setting. When disabled, doxygen will generate a PHP script for searching and
1616 # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
1617 # and searching needs to be provided by external tools. See the section
1618 # "External Indexing and Searching" for details.
1619 # The default value is: NO.
1620 # This tag requires that the tag SEARCHENGINE is set to YES.
1621
1622 SERVER_BASED_SEARCH = NO
1623
1624 # When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
1625 # script for searching. Instead the search results are written to an XML file
1626 # which needs to be processed by an external indexer. Doxygen will invoke an
1627 # external search engine pointed to by the SEARCHENGINE_URL option to obtain the
1628 # search results.
1629 #
1630 # Doxygen ships with an example indexer (doxyindexer) and search engine
1631 # (doxysearch.cgi) which are based on the open source search engine library
1632 # Xapian (see: https://xapian.org/).
1633 #
1634 # See the section "External Indexing and Searching" for details.
1635 # The default value is: NO.
1636 # This tag requires that the tag SEARCHENGINE is set to YES.
1637
1638 EXTERNAL_SEARCH = NO
1639
1640 # The SEARCHENGINE_URL should point to a search engine hosted by a web server
1641 # which will return the search results when EXTERNAL_SEARCH is enabled.
1642 #
1643 # Doxygen ships with an example indexer (doxyindexer) and search engine
1644 # (doxysearch.cgi) which are based on the open source search engine library
1645 # Xapian (see: https://xapian.org/). See the section "External Indexing and
1646 # Searching" for details.
1647 # This tag requires that the tag SEARCHENGINE is set to YES.
1648
1649 SEARCHENGINE_URL =
1650
1651 # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
1652 # search data is written to a file for indexing by an external tool. With the
1653 # SEARCHDATA_FILE tag the name of this file can be specified.
1654 # The default file is: searchdata.xml.
1655 # This tag requires that the tag SEARCHENGINE is set to YES.
1656
1657 SEARCHDATA_FILE = searchdata.xml
1658
1659 # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
1660 # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
1661 # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
1662 # projects and redirect the results back to the right project.
1663 # This tag requires that the tag SEARCHENGINE is set to YES.
1664
1665 EXTERNAL_SEARCH_ID =
1666
1667 # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
1668 # projects other than the one defined by this configuration file, but that are
1669 # all added to the same external search index. Each project needs to have a
1670 # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
1671 # to a relative location where the documentation can be found. The format is:
1672 # EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
1673 # This tag requires that the tag SEARCHENGINE is set to YES.
1674
1675 EXTRA_SEARCH_MAPPINGS =
1676
1677 #---------------------------------------------------------------------------
1678 # Configuration options related to the LaTeX output
1679 #---------------------------------------------------------------------------
1680
1681 # If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
1682 # The default value is: YES.
1683
1684 GENERATE_LATEX = NO
1685
1686 # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
1687 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
1688 # it.
1689 # The default directory is: latex.
1690 # This tag requires that the tag GENERATE_LATEX is set to YES.
1691
1692 LATEX_OUTPUT = latex
1693
1694 # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
1695 # invoked.
1696 #
1697 # Note that when not enabling USE_PDFLATEX the default is latex when enabling
1698 # USE_PDFLATEX the default is pdflatex and when in the later case latex is
1699 # chosen this is overwritten by pdflatex. For specific output languages the
1700 # default can have been set differently, this depends on the implementation of
1701 # the output language.
1702 # This tag requires that the tag GENERATE_LATEX is set to YES.
1703
1704 LATEX_CMD_NAME = latex
1705
1706 # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
1707 # index for LaTeX.
1708 # Note: This tag is used in the Makefile / make.bat.
1709 # See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
1710 # (.tex).
1711 # The default file is: makeindex.
1712 # This tag requires that the tag GENERATE_LATEX is set to YES.
1713
1714 MAKEINDEX_CMD_NAME = makeindex
1715
1716 # The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
1717 # generate index for LaTeX. In case there is no backslash (\) as first character
1718 # it will be automatically added in the LaTeX code.
1719 # Note: This tag is used in the generated output file (.tex).
1720 # See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
1721 # The default value is: makeindex.
1722 # This tag requires that the tag GENERATE_LATEX is set to YES.
1723
1724 LATEX_MAKEINDEX_CMD = makeindex
1725
1726 # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
1727 # documents. This may be useful for small projects and may help to save some
1728 # trees in general.
1729 # The default value is: NO.
1730 # This tag requires that the tag GENERATE_LATEX is set to YES.
1731
1732 COMPACT_LATEX = NO
1733
1734 # The PAPER_TYPE tag can be used to set the paper type that is used by the
1735 # printer.
1736 # Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
1737 # 14 inches) and executive (7.25 x 10.5 inches).
1738 # The default value is: a4.
1739 # This tag requires that the tag GENERATE_LATEX is set to YES.
1740
1741 PAPER_TYPE = a4
1742
1743 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
1744 # that should be included in the LaTeX output. The package can be specified just
1745 # by its name or with the correct syntax as to be used with the LaTeX
1746 # \usepackage command. To get the times font for instance you can specify :
1747 # EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
1748 # To use the option intlimits with the amsmath package you can specify:
1749 # EXTRA_PACKAGES=[intlimits]{amsmath}
1750 # If left blank no extra packages will be included.
1751 # This tag requires that the tag GENERATE_LATEX is set to YES.
1752
1753 EXTRA_PACKAGES =
1754
1755 # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
1756 # generated LaTeX document. The header should contain everything until the first
1757 # chapter. If it is left blank doxygen will generate a standard header. See
1758 # section "Doxygen usage" for information on how to let doxygen write the
1759 # default header to a separate file.
1760 #
1761 # Note: Only use a user-defined header if you know what you are doing! The
1762 # following commands have a special meaning inside the header: $title,
1763 # $datetime, $date, $doxygenversion, $projectname, $projectnumber,
1764 # $projectbrief, $projectlogo. Doxygen will replace $title with the empty
1765 # string, for the replacement values of the other commands the user is referred
1766 # to HTML_HEADER.
1767 # This tag requires that the tag GENERATE_LATEX is set to YES.
1768
1769 LATEX_HEADER =
1770
1771 # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
1772 # generated LaTeX document. The footer should contain everything after the last
1773 # chapter. If it is left blank doxygen will generate a standard footer. See
1774 # LATEX_HEADER for more information on how to generate a default footer and what
1775 # special commands can be used inside the footer.
1776 #
1777 # Note: Only use a user-defined footer if you know what you are doing!
1778 # This tag requires that the tag GENERATE_LATEX is set to YES.
1779
1780 LATEX_FOOTER =
1781
1782 # The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
1783 # LaTeX style sheets that are included after the standard style sheets created
1784 # by doxygen. Using this option one can overrule certain style aspects. Doxygen
1785 # will copy the style sheet files to the output directory.
1786 # Note: The order of the extra style sheet files is of importance (e.g. the last
1787 # style sheet in the list overrules the setting of the previous ones in the
1788 # list).
1789 # This tag requires that the tag GENERATE_LATEX is set to YES.
1790
1791 LATEX_EXTRA_STYLESHEET =
1792
1793 # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
1794 # other source files which should be copied to the LATEX_OUTPUT output
1795 # directory. Note that the files will be copied as-is; there are no commands or
1796 # markers available.
1797 # This tag requires that the tag GENERATE_LATEX is set to YES.
1798
1799 LATEX_EXTRA_FILES =
1800
1801 # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
1802 # prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
1803 # contain links (just like the HTML output) instead of page references. This
1804 # makes the output suitable for online browsing using a PDF viewer.
1805 # The default value is: YES.
1806 # This tag requires that the tag GENERATE_LATEX is set to YES.
1807
1808 PDF_HYPERLINKS = YES
1809
1810 # If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
1811 # the PDF file directly from the LaTeX files. Set this option to YES, to get a
1812 # higher quality PDF documentation.
1813 # The default value is: YES.
1814 # This tag requires that the tag GENERATE_LATEX is set to YES.
1815
1816 USE_PDFLATEX = YES
1817
1818 # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
1819 # command to the generated LaTeX files. This will instruct LaTeX to keep running
1820 # if errors occur, instead of asking the user for help. This option is also used
1821 # when generating formulas in HTML.
1822 # The default value is: NO.
1823 # This tag requires that the tag GENERATE_LATEX is set to YES.
1824
1825 LATEX_BATCHMODE = NO
1826
1827 # If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
1828 # index chapters (such as File Index, Compound Index, etc.) in the output.
1829 # The default value is: NO.
1830 # This tag requires that the tag GENERATE_LATEX is set to YES.
1831
1832 LATEX_HIDE_INDICES = NO
1833
1834 # If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
1835 # code with syntax highlighting in the LaTeX output.
1836 #
1837 # Note that which sources are shown also depends on other settings such as
1838 # SOURCE_BROWSER.
1839 # The default value is: NO.
1840 # This tag requires that the tag GENERATE_LATEX is set to YES.
1841
1842 LATEX_SOURCE_CODE = NO
1843
1844 # The LATEX_BIB_STYLE tag can be used to specify the style to use for the
1845 # bibliography, e.g. plainnat, or ieeetr. See
1846 # https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
1847 # The default value is: plain.
1848 # This tag requires that the tag GENERATE_LATEX is set to YES.
1849
1850 LATEX_BIB_STYLE = plain
1851
1852 # If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
1853 # page will contain the date and time when the page was generated. Setting this
1854 # to NO can help when comparing the output of multiple runs.
1855 # The default value is: NO.
1856 # This tag requires that the tag GENERATE_LATEX is set to YES.
1857
1858 LATEX_TIMESTAMP = NO
1859
1860 # The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
1861 # path from which the emoji images will be read. If a relative path is entered,
1862 # it will be relative to the LATEX_OUTPUT directory. If left blank the
1863 # LATEX_OUTPUT directory will be used.
1864 # This tag requires that the tag GENERATE_LATEX is set to YES.
1865
1866 LATEX_EMOJI_DIRECTORY =
1867
1868 #---------------------------------------------------------------------------
1869 # Configuration options related to the RTF output
1870 #---------------------------------------------------------------------------
1871
1872 # If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
1873 # RTF output is optimized for Word 97 and may not look too pretty with other RTF
1874 # readers/editors.
1875 # The default value is: NO.
1876
1877 GENERATE_RTF = NO
1878
1879 # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
1880 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
1881 # it.
1882 # The default directory is: rtf.
1883 # This tag requires that the tag GENERATE_RTF is set to YES.
1884
1885 RTF_OUTPUT = rtf
1886
1887 # If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
1888 # documents. This may be useful for small projects and may help to save some
1889 # trees in general.
1890 # The default value is: NO.
1891 # This tag requires that the tag GENERATE_RTF is set to YES.
1892
1893 COMPACT_RTF = NO
1894
1895 # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
1896 # contain hyperlink fields. The RTF file will contain links (just like the HTML
1897 # output) instead of page references. This makes the output suitable for online
1898 # browsing using Word or some other Word compatible readers that support those
1899 # fields.
1900 #
1901 # Note: WordPad (write) and others do not support links.
1902 # The default value is: NO.
1903 # This tag requires that the tag GENERATE_RTF is set to YES.
1904
1905 RTF_HYPERLINKS = NO
1906
1907 # Load stylesheet definitions from file. Syntax is similar to doxygen's
1908 # configuration file, i.e. a series of assignments. You only have to provide
1909 # replacements, missing definitions are set to their default value.
1910 #
1911 # See also section "Doxygen usage" for information on how to generate the
1912 # default style sheet that doxygen normally uses.
1913 # This tag requires that the tag GENERATE_RTF is set to YES.
1914
1915 RTF_STYLESHEET_FILE =
1916
1917 # Set optional variables used in the generation of an RTF document. Syntax is
1918 # similar to doxygen's configuration file. A template extensions file can be
1919 # generated using doxygen -e rtf extensionFile.
1920 # This tag requires that the tag GENERATE_RTF is set to YES.
1921
1922 RTF_EXTENSIONS_FILE =
1923
1924 # If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
1925 # with syntax highlighting in the RTF output.
1926 #
1927 # Note that which sources are shown also depends on other settings such as
1928 # SOURCE_BROWSER.
1929 # The default value is: NO.
1930 # This tag requires that the tag GENERATE_RTF is set to YES.
1931
1932 RTF_SOURCE_CODE = NO
1933
1934 #---------------------------------------------------------------------------
1935 # Configuration options related to the man page output
1936 #---------------------------------------------------------------------------
1937
1938 # If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
1939 # classes and files.
1940 # The default value is: NO.
1941
1942 GENERATE_MAN = NO
1943
1944 # The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
1945 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
1946 # it. A directory man3 will be created inside the directory specified by
1947 # MAN_OUTPUT.
1948 # The default directory is: man.
1949 # This tag requires that the tag GENERATE_MAN is set to YES.
1950
1951 MAN_OUTPUT = man
1952
1953 # The MAN_EXTENSION tag determines the extension that is added to the generated
1954 # man pages. In case the manual section does not start with a number, the number
1955 # 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
1956 # optional.
1957 # The default value is: .3.
1958 # This tag requires that the tag GENERATE_MAN is set to YES.
1959
1960 MAN_EXTENSION = .3
1961
1962 # The MAN_SUBDIR tag determines the name of the directory created within
1963 # MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
1964 # MAN_EXTENSION with the initial . removed.
1965 # This tag requires that the tag GENERATE_MAN is set to YES.
1966
1967 MAN_SUBDIR =
1968
1969 # If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
1970 # will generate one additional man file for each entity documented in the real
1971 # man page(s). These additional files only source the real man page, but without
1972 # them the man command would be unable to find the correct page.
1973 # The default value is: NO.
1974 # This tag requires that the tag GENERATE_MAN is set to YES.
1975
1976 MAN_LINKS = NO
1977
1978 #---------------------------------------------------------------------------
1979 # Configuration options related to the XML output
1980 #---------------------------------------------------------------------------
1981
1982 # If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
1983 # captures the structure of the code including all documentation.
1984 # The default value is: NO.
1985
1986 GENERATE_XML = NO
1987
1988 # The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
1989 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
1990 # it.
1991 # The default directory is: xml.
1992 # This tag requires that the tag GENERATE_XML is set to YES.
1993
1994 XML_OUTPUT = xml
1995
1996 # If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
1997 # listings (including syntax highlighting and cross-referencing information) to
1998 # the XML output. Note that enabling this will significantly increase the size
1999 # of the XML output.
2000 # The default value is: YES.
2001 # This tag requires that the tag GENERATE_XML is set to YES.
2002
2003 XML_PROGRAMLISTING = YES
2004
2005 # If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
2006 # namespace members in file scope as well, matching the HTML output.
2007 # The default value is: NO.
2008 # This tag requires that the tag GENERATE_XML is set to YES.
2009
2010 XML_NS_MEMB_FILE_SCOPE = NO
2011
2012 #---------------------------------------------------------------------------
2013 # Configuration options related to the DOCBOOK output
2014 #---------------------------------------------------------------------------
2015
2016 # If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
2017 # that can be used to generate PDF.
2018 # The default value is: NO.
2019
2020 GENERATE_DOCBOOK = NO
2021
2022 # The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
2023 # If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
2024 # front of it.
2025 # The default directory is: docbook.
2026 # This tag requires that the tag GENERATE_DOCBOOK is set to YES.
2027
2028 DOCBOOK_OUTPUT = docbook
2029
2030 # If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
2031 # program listings (including syntax highlighting and cross-referencing
2032 # information) to the DOCBOOK output. Note that enabling this will significantly
2033 # increase the size of the DOCBOOK output.
2034 # The default value is: NO.
2035 # This tag requires that the tag GENERATE_DOCBOOK is set to YES.
2036
2037 DOCBOOK_PROGRAMLISTING = NO
2038
2039 #---------------------------------------------------------------------------
2040 # Configuration options for the AutoGen Definitions output
2041 #---------------------------------------------------------------------------
2042
2043 # If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
2044 # AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
2045 # the structure of the code including all documentation. Note that this feature
2046 # is still experimental and incomplete at the moment.
2047 # The default value is: NO.
2048
2049 GENERATE_AUTOGEN_DEF = NO
2050
2051 #---------------------------------------------------------------------------
2052 # Configuration options related to the Perl module output
2053 #---------------------------------------------------------------------------
2054
2055 # If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
2056 # file that captures the structure of the code including all documentation.
2057 #
2058 # Note that this feature is still experimental and incomplete at the moment.
2059 # The default value is: NO.
2060
2061 GENERATE_PERLMOD = NO
2062
2063 # If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
2064 # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
2065 # output from the Perl module output.
2066 # The default value is: NO.
2067 # This tag requires that the tag GENERATE_PERLMOD is set to YES.
2068
2069 PERLMOD_LATEX = NO
2070
2071 # If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
2072 # formatted so it can be parsed by a human reader. This is useful if you want to
2073 # understand what is going on. On the other hand, if this tag is set to NO, the
2074 # size of the Perl module output will be much smaller and Perl will parse it
2075 # just the same.
2076 # The default value is: YES.
2077 # This tag requires that the tag GENERATE_PERLMOD is set to YES.
2078
2079 PERLMOD_PRETTY = YES
2080
2081 # The names of the make variables in the generated doxyrules.make file are
2082 # prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
2083 # so different doxyrules.make files included by the same Makefile don't
2084 # overwrite each other's variables.
2085 # This tag requires that the tag GENERATE_PERLMOD is set to YES.
2086
2087 PERLMOD_MAKEVAR_PREFIX =
2088
2089 #---------------------------------------------------------------------------
2090 # Configuration options related to the preprocessor
2091 #---------------------------------------------------------------------------
2092
2093 # If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
2094 # C-preprocessor directives found in the sources and include files.
2095 # The default value is: YES.
2096
2097 ENABLE_PREPROCESSING = YES
2098
2099 # If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
2100 # in the source code. If set to NO, only conditional compilation will be
2101 # performed. Macro expansion can be done in a controlled way by setting
2102 # EXPAND_ONLY_PREDEF to YES.
2103 # The default value is: NO.
2104 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
2105
2106 MACRO_EXPANSION = YES
2107
2108 # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
2109 # the macro expansion is limited to the macros specified with the PREDEFINED and
2110 # EXPAND_AS_DEFINED tags.
2111 # The default value is: NO.
2112 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
2113
2114 EXPAND_ONLY_PREDEF = NO
2115
2116 # If the SEARCH_INCLUDES tag is set to YES, the include files in the
2117 # INCLUDE_PATH will be searched if a #include is found.
2118 # The default value is: YES.
2119 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
2120
2121 SEARCH_INCLUDES = YES
2122
2123 # The INCLUDE_PATH tag can be used to specify one or more directories that
2124 # contain include files that are not input files but should be processed by the
2125 # preprocessor.
2126 # This tag requires that the tag SEARCH_INCLUDES is set to YES.
2127
2128 INCLUDE_PATH =
2129
2130 # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
2131 # patterns (like *.h and *.hpp) to filter out the header-files in the
2132 # directories. If left blank, the patterns specified with FILE_PATTERNS will be
2133 # used.
2134 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
2135
2136 INCLUDE_FILE_PATTERNS =
2137
2138 # The PREDEFINED tag can be used to specify one or more macro names that are
2139 # defined before the preprocessor is started (similar to the -D option of e.g.
2140 # gcc). The argument of the tag is a list of macros of the form: name or
2141 # name=definition (no spaces). If the definition and the "=" are omitted, "=1"
2142 # is assumed. To prevent a macro definition from being undefined via #undef or
2143 # recursively expanded use the := operator instead of the = operator.
2144 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
2145
2146 PREDEFINED = DOXYGEN \
2147 FOONATHAN_NOEXCEPT:=noexcept \
2148 FOONATHAN_SFINAE(x):= \
2149 FOONATHAN_REQUIRES(x):= \
2150 FOONATHAN_REQUIRES_RET(x,r):= \
2151 FOONATHAN_CONSTEXPR:=constexpr \
2152 FOONATHAN_CONSTEXPR_FNC:=constexpr \
2153 FOONATHAN_IMPL_DEFINED():=implementation_defined \
2154 FOONATHAN_EBO():=
2155
2156 # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
2157 # tag can be used to specify a list of macro names that should be expanded. The
2158 # macro definition that is found in the sources will be used. Use the PREDEFINED
2159 # tag if you want to use a different macro definition that overrules the
2160 # definition found in the source code.
2161 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
2162
2163 EXPAND_AS_DEFINED =
2164
2165 # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
2166 # remove all references to function-like macros that are alone on a line, have
2167 # an all uppercase name, and do not end with a semicolon. Such function macros
2168 # are typically used for boiler-plate code, and will confuse the parser if not
2169 # removed.
2170 # The default value is: YES.
2171 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
2172
2173 SKIP_FUNCTION_MACROS = NO
2174
2175 #---------------------------------------------------------------------------
2176 # Configuration options related to external references
2177 #---------------------------------------------------------------------------
2178
2179 # The TAGFILES tag can be used to specify one or more tag files. For each tag
2180 # file the location of the external documentation should be added. The format of
2181 # a tag file without this location is as follows:
2182 # TAGFILES = file1 file2 ...
2183 # Adding location for the tag files is done as follows:
2184 # TAGFILES = file1=loc1 "file2 = loc2" ...
2185 # where loc1 and loc2 can be relative or absolute paths or URLs. See the
2186 # section "Linking to external documentation" for more information about the use
2187 # of tag files.
2188 # Note: Each tag file must have a unique name (where the name does NOT include
2189 # the path). If a tag file is not located in the directory in which doxygen is
2190 # run, you must also specify the path to the tagfile here.
2191
2192 TAGFILES =
2193
2194 # When a file name is specified after GENERATE_TAGFILE, doxygen will create a
2195 # tag file that is based on the input files it reads. See section "Linking to
2196 # external documentation" for more information about the usage of tag files.
2197
2198 GENERATE_TAGFILE =
2199
2200 # If the ALLEXTERNALS tag is set to YES, all external class will be listed in
2201 # the class index. If set to NO, only the inherited external classes will be
2202 # listed.
2203 # The default value is: NO.
2204
2205 ALLEXTERNALS = NO
2206
2207 # If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
2208 # in the modules index. If set to NO, only the current project's groups will be
2209 # listed.
2210 # The default value is: YES.
2211
2212 EXTERNAL_GROUPS = YES
2213
2214 # If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
2215 # the related pages index. If set to NO, only the current project's pages will
2216 # be listed.
2217 # The default value is: YES.
2218
2219 EXTERNAL_PAGES = YES
2220
2221 #---------------------------------------------------------------------------
2222 # Configuration options related to the dot tool
2223 #---------------------------------------------------------------------------
2224
2225 # If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
2226 # (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
2227 # NO turns the diagrams off. Note that this option also works with HAVE_DOT
2228 # disabled, but it is recommended to install and use dot, since it yields more
2229 # powerful graphs.
2230 # The default value is: YES.
2231
2232 CLASS_DIAGRAMS = YES
2233
2234 # You can include diagrams made with dia in doxygen documentation. Doxygen will
2235 # then run dia to produce the diagram and insert it in the documentation. The
2236 # DIA_PATH tag allows you to specify the directory where the dia binary resides.
2237 # If left empty dia is assumed to be found in the default search path.
2238
2239 DIA_PATH =
2240
2241 # If set to YES the inheritance and collaboration graphs will hide inheritance
2242 # and usage relations if the target is undocumented or is not a class.
2243 # The default value is: YES.
2244
2245 HIDE_UNDOC_RELATIONS = YES
2246
2247 # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
2248 # available from the path. This tool is part of Graphviz (see:
2249 # http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
2250 # Bell Labs. The other options in this section have no effect if this option is
2251 # set to NO
2252 # The default value is: NO.
2253
2254 HAVE_DOT = NO
2255
2256 # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
2257 # to run in parallel. When set to 0 doxygen will base this on the number of
2258 # processors available in the system. You can set it explicitly to a value
2259 # larger than 0 to get control over the balance between CPU load and processing
2260 # speed.
2261 # Minimum value: 0, maximum value: 32, default value: 0.
2262 # This tag requires that the tag HAVE_DOT is set to YES.
2263
2264 DOT_NUM_THREADS = 0
2265
2266 # When you want a differently looking font in the dot files that doxygen
2267 # generates you can specify the font name using DOT_FONTNAME. You need to make
2268 # sure dot is able to find the font, which can be done by putting it in a
2269 # standard location or by setting the DOTFONTPATH environment variable or by
2270 # setting DOT_FONTPATH to the directory containing the font.
2271 # The default value is: Helvetica.
2272 # This tag requires that the tag HAVE_DOT is set to YES.
2273
2274 DOT_FONTNAME = Helvetica
2275
2276 # The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
2277 # dot graphs.
2278 # Minimum value: 4, maximum value: 24, default value: 10.
2279 # This tag requires that the tag HAVE_DOT is set to YES.
2280
2281 DOT_FONTSIZE = 10
2282
2283 # By default doxygen will tell dot to use the default font as specified with
2284 # DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
2285 # the path where dot can find it using this tag.
2286 # This tag requires that the tag HAVE_DOT is set to YES.
2287
2288 DOT_FONTPATH =
2289
2290 # If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
2291 # each documented class showing the direct and indirect inheritance relations.
2292 # Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
2293 # The default value is: YES.
2294 # This tag requires that the tag HAVE_DOT is set to YES.
2295
2296 CLASS_GRAPH = YES
2297
2298 # If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
2299 # graph for each documented class showing the direct and indirect implementation
2300 # dependencies (inheritance, containment, and class references variables) of the
2301 # class with other documented classes.
2302 # The default value is: YES.
2303 # This tag requires that the tag HAVE_DOT is set to YES.
2304
2305 COLLABORATION_GRAPH = YES
2306
2307 # If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
2308 # groups, showing the direct groups dependencies.
2309 # The default value is: YES.
2310 # This tag requires that the tag HAVE_DOT is set to YES.
2311
2312 GROUP_GRAPHS = YES
2313
2314 # If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
2315 # collaboration diagrams in a style similar to the OMG's Unified Modeling
2316 # Language.
2317 # The default value is: NO.
2318 # This tag requires that the tag HAVE_DOT is set to YES.
2319
2320 UML_LOOK = NO
2321
2322 # If the UML_LOOK tag is enabled, the fields and methods are shown inside the
2323 # class node. If there are many fields or methods and many nodes the graph may
2324 # become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
2325 # number of items for each type to make the size more manageable. Set this to 0
2326 # for no limit. Note that the threshold may be exceeded by 50% before the limit
2327 # is enforced. So when you set the threshold to 10, up to 15 fields may appear,
2328 # but if the number exceeds 15, the total amount of fields shown is limited to
2329 # 10.
2330 # Minimum value: 0, maximum value: 100, default value: 10.
2331 # This tag requires that the tag HAVE_DOT is set to YES.
2332
2333 UML_LIMIT_NUM_FIELDS = 10
2334
2335 # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
2336 # collaboration graphs will show the relations between templates and their
2337 # instances.
2338 # The default value is: NO.
2339 # This tag requires that the tag HAVE_DOT is set to YES.
2340
2341 TEMPLATE_RELATIONS = NO
2342
2343 # If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
2344 # YES then doxygen will generate a graph for each documented file showing the
2345 # direct and indirect include dependencies of the file with other documented
2346 # files.
2347 # The default value is: YES.
2348 # This tag requires that the tag HAVE_DOT is set to YES.
2349
2350 INCLUDE_GRAPH = YES
2351
2352 # If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
2353 # set to YES then doxygen will generate a graph for each documented file showing
2354 # the direct and indirect include dependencies of the file with other documented
2355 # files.
2356 # The default value is: YES.
2357 # This tag requires that the tag HAVE_DOT is set to YES.
2358
2359 INCLUDED_BY_GRAPH = YES
2360
2361 # If the CALL_GRAPH tag is set to YES then doxygen will generate a call
2362 # dependency graph for every global function or class method.
2363 #
2364 # Note that enabling this option will significantly increase the time of a run.
2365 # So in most cases it will be better to enable call graphs for selected
2366 # functions only using the \callgraph command. Disabling a call graph can be
2367 # accomplished by means of the command \hidecallgraph.
2368 # The default value is: NO.
2369 # This tag requires that the tag HAVE_DOT is set to YES.
2370
2371 CALL_GRAPH = NO
2372
2373 # If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
2374 # dependency graph for every global function or class method.
2375 #
2376 # Note that enabling this option will significantly increase the time of a run.
2377 # So in most cases it will be better to enable caller graphs for selected
2378 # functions only using the \callergraph command. Disabling a caller graph can be
2379 # accomplished by means of the command \hidecallergraph.
2380 # The default value is: NO.
2381 # This tag requires that the tag HAVE_DOT is set to YES.
2382
2383 CALLER_GRAPH = NO
2384
2385 # If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
2386 # hierarchy of all classes instead of a textual one.
2387 # The default value is: YES.
2388 # This tag requires that the tag HAVE_DOT is set to YES.
2389
2390 GRAPHICAL_HIERARCHY = YES
2391
2392 # If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
2393 # dependencies a directory has on other directories in a graphical way. The
2394 # dependency relations are determined by the #include relations between the
2395 # files in the directories.
2396 # The default value is: YES.
2397 # This tag requires that the tag HAVE_DOT is set to YES.
2398
2399 DIRECTORY_GRAPH = YES
2400
2401 # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
2402 # generated by dot. For an explanation of the image formats see the section
2403 # output formats in the documentation of the dot tool (Graphviz (see:
2404 # http://www.graphviz.org/)).
2405 # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
2406 # to make the SVG files visible in IE 9+ (other browsers do not have this
2407 # requirement).
2408 # Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
2409 # png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
2410 # png:gdiplus:gdiplus.
2411 # The default value is: png.
2412 # This tag requires that the tag HAVE_DOT is set to YES.
2413
2414 DOT_IMAGE_FORMAT = png
2415
2416 # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
2417 # enable generation of interactive SVG images that allow zooming and panning.
2418 #
2419 # Note that this requires a modern browser other than Internet Explorer. Tested
2420 # and working are Firefox, Chrome, Safari, and Opera.
2421 # Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
2422 # the SVG files visible. Older versions of IE do not have SVG support.
2423 # The default value is: NO.
2424 # This tag requires that the tag HAVE_DOT is set to YES.
2425
2426 INTERACTIVE_SVG = NO
2427
2428 # The DOT_PATH tag can be used to specify the path where the dot tool can be
2429 # found. If left blank, it is assumed the dot tool can be found in the path.
2430 # This tag requires that the tag HAVE_DOT is set to YES.
2431
2432 DOT_PATH =
2433
2434 # The DOTFILE_DIRS tag can be used to specify one or more directories that
2435 # contain dot files that are included in the documentation (see the \dotfile
2436 # command).
2437 # This tag requires that the tag HAVE_DOT is set to YES.
2438
2439 DOTFILE_DIRS =
2440
2441 # The MSCFILE_DIRS tag can be used to specify one or more directories that
2442 # contain msc files that are included in the documentation (see the \mscfile
2443 # command).
2444
2445 MSCFILE_DIRS =
2446
2447 # The DIAFILE_DIRS tag can be used to specify one or more directories that
2448 # contain dia files that are included in the documentation (see the \diafile
2449 # command).
2450
2451 DIAFILE_DIRS =
2452
2453 # When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
2454 # path where java can find the plantuml.jar file. If left blank, it is assumed
2455 # PlantUML is not used or called during a preprocessing step. Doxygen will
2456 # generate a warning when it encounters a \startuml command in this case and
2457 # will not generate output for the diagram.
2458
2459 PLANTUML_JAR_PATH =
2460
2461 # When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
2462 # configuration file for plantuml.
2463
2464 PLANTUML_CFG_FILE =
2465
2466 # When using plantuml, the specified paths are searched for files specified by
2467 # the !include statement in a plantuml block.
2468
2469 PLANTUML_INCLUDE_PATH =
2470
2471 # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
2472 # that will be shown in the graph. If the number of nodes in a graph becomes
2473 # larger than this value, doxygen will truncate the graph, which is visualized
2474 # by representing a node as a red box. Note that doxygen if the number of direct
2475 # children of the root node in a graph is already larger than
2476 # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
2477 # the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
2478 # Minimum value: 0, maximum value: 10000, default value: 50.
2479 # This tag requires that the tag HAVE_DOT is set to YES.
2480
2481 DOT_GRAPH_MAX_NODES = 50
2482
2483 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
2484 # generated by dot. A depth value of 3 means that only nodes reachable from the
2485 # root by following a path via at most 3 edges will be shown. Nodes that lay
2486 # further from the root node will be omitted. Note that setting this option to 1
2487 # or 2 may greatly reduce the computation time needed for large code bases. Also
2488 # note that the size of a graph can be further restricted by
2489 # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
2490 # Minimum value: 0, maximum value: 1000, default value: 0.
2491 # This tag requires that the tag HAVE_DOT is set to YES.
2492
2493 MAX_DOT_GRAPH_DEPTH = 0
2494
2495 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
2496 # background. This is disabled by default, because dot on Windows does not seem
2497 # to support this out of the box.
2498 #
2499 # Warning: Depending on the platform used, enabling this option may lead to
2500 # badly anti-aliased labels on the edges of a graph (i.e. they become hard to
2501 # read).
2502 # The default value is: NO.
2503 # This tag requires that the tag HAVE_DOT is set to YES.
2504
2505 DOT_TRANSPARENT = NO
2506
2507 # Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
2508 # files in one run (i.e. multiple -o and -T options on the command line). This
2509 # makes dot run faster, but since only newer versions of dot (>1.8.10) support
2510 # this, this feature is disabled by default.
2511 # The default value is: NO.
2512 # This tag requires that the tag HAVE_DOT is set to YES.
2513
2514 DOT_MULTI_TARGETS = NO
2515
2516 # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
2517 # explaining the meaning of the various boxes and arrows in the dot generated
2518 # graphs.
2519 # The default value is: YES.
2520 # This tag requires that the tag HAVE_DOT is set to YES.
2521
2522 GENERATE_LEGEND = YES
2523
2524 # If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
2525 # files that are used to generate the various graphs.
2526 # The default value is: YES.
2527 # This tag requires that the tag HAVE_DOT is set to YES.
2528
2529 DOT_CLEANUP = YES
0 <doxygenlayout version="1.0">
1 <!-- Navigation index tabs for HTML output -->
2 <navindex>
3 <tab type="mainpage" visible="yes" title=""/>
4 <tab type="pages" visible="yes" title="Tutorial" intro=""/>
5 <tab type="examples" visible="yes" title="" intro=""/>
6 <tab type="user" visible="yes" title="Index" url="group__memory.html"/>
7 <tab type="filelist" visible="yes" title="Files"/>
8 </navindex>
9
10 <!-- Layout definition for a group page -->
11 <group>
12 <briefdescription visible="no"/>
13 <groupgraph visible="no"/>
14 <memberdecl>
15 <nestedgroups visible="no" title=""/>
16 <dirs visible="no" title=""/>
17 <files visible="no" title=""/>
18 <namespaces visible="no" title=""/>
19 <classes visible="yes" title=""/>
20 <defines title=""/>
21 <typedefs title=""/>
22 <enums title=""/>
23 <enumvalues title=""/>
24 <functions title=""/>
25 <variables title=""/>
26 <signals title=""/>
27 <publicslots title=""/>
28 <protectedslots title=""/>
29 <privateslots title=""/>
30 <events title=""/>
31 <properties title=""/>
32 <friends title=""/>
33 <membergroups visible="yes"/>
34 </memberdecl>
35 <detaileddescription visible="no" title=""/>
36 <memberdef>
37 <pagedocs/>
38 <inlineclasses title=""/>
39 <defines title="Defines"/>
40 <typedefs title="Typedefs"/>
41 <enums title="Enumerations"/>
42 <enumvalues title=""/>
43 <functions title="Functions"/>
44 <variables title=""/>
45 <signals title=""/>
46 <publicslots title=""/>
47 <protectedslots title=""/>
48 <privateslots title=""/>
49 <events title=""/>
50 <properties title=""/>
51 <friends title=""/>
52 </memberdef>
53 <authorsection visible="yes"/>
54 </group>
55
56 <!-- Layout definition for a class page -->
57 <class>
58 <briefdescription visible="no"/>
59 <detaileddescription title=""/>
60 <includes visible="no"/>
61 <inheritancegraph visible="no"/>
62 <collaborationgraph visible="no"/>
63 <memberdecl>
64 <nestedclasses visible="yes" title=""/>
65 <publictypes title="Typedefs"/>
66 <publicstaticattributes title="Constants"/>
67 <publicstaticmethods title="Static Functions"/>
68 <publicmethods title="Member Functions"/>
69 <related title="" subtitle=" "/>
70 <membergroups visible="yes"/>
71 </memberdecl>
72 <memberdef>
73 <inlineclasses title=""/>
74 <typedefs title="Typedefs"/>
75 <enums title="Enumerations"/>
76 <constructors title="Constructors"/>
77 <functions title="Member Functions"/>
78 <related title="Related Functions"/>
79 </memberdef>
80 <allmemberslink visible="no"/>
81 <usedfiles visible="no"/>
82 <authorsection visible="yes"/>
83 </class>
84
85 <!-- Layout definition for a file page -->
86 <file>
87 <briefdescription visible="no"/>
88 <detaileddescription title=""/>
89 <includes visible="no"/>
90 <includegraph visible="no"/>
91 <includedbygraph visible="no"/>
92 <sourcelink visible="no"/>
93 <memberdecl>
94 <defines title=""/>
95 <typedefs title=""/>
96 <enums title=""/>
97 <classes visible="yes" title=""/>
98 <variables title=""/>
99 <functions title=""/>
100 <namespaces visible="no" title=""/>
101 <constantgroups visible="yes" title=""/>
102 <membergroups visible="yes"/>
103 </memberdecl>
104 <memberdef>
105 <inlineclasses title=""/>
106 <defines title="Defines"/>
107 <typedefs title="Typedefs"/>
108 <enums title="Enumerations"/>
109 <variables title=""/>
110 <functions title="Functions"/>
111 </memberdef>
112 <authorsection/>
113 </file>
114
115 <!-- Layout definition for a namespace page -->
116 <namespace>
117 <briefdescription visible="yes"/>
118 <memberdecl>
119 <nestednamespaces visible="yes" title=""/>
120 <constantgroups visible="yes" title=""/>
121 <classes visible="yes" title=""/>
122 <typedefs title=""/>
123 <enums title=""/>
124 <functions title=""/>
125 <variables title=""/>
126 <membergroups visible="yes"/>
127 </memberdecl>
128 <detaileddescription title=""/>
129 <memberdef>
130 <inlineclasses title=""/>
131 <typedefs title=""/>
132 <enums title=""/>
133 <functions title=""/>
134 <variables title=""/>
135 </memberdef>
136 <authorsection visible="yes"/>
137 </namespace>
138
139 <!-- Layout definition for a directory page -->
140 <directory>
141 <briefdescription visible="yes"/>
142 <directorygraph visible="yes"/>
143 <memberdecl>
144 <dirs visible="yes"/>
145 <files visible="yes"/>
146 </memberdecl>
147 <detaileddescription title=""/>
148 </directory>
149 </doxygenlayout>
0 # Allocator adapters and storage classes
1
2 In addition to the various allocator classes, it also provides a couple of adapters and storage classes for [RawAllocator] instances.
3
4 ## Allocator storage classes
5
6 Allocator storage classes are classes that store allocator objects.
7 They are all aliases for the class template [allocator_storage] with a certain [StoragePolicy].
8
9 The class simply delegates to the policy and provides the same constructors, but also the full set of [RawAllocator] member functions.
10 This allows accessing an allocator stored in an [allocator_storage] directly without using the traits.
11 A `Mutex` can also be specified that takes care of synchronization, the member function `lock()` returns a synchronized proxy object that acts like a pointer.
12
13 In addition to the following predefined policy classes it is possible to define your own.
14 Everything is defined in the header file allocator_storage.hpp.
15
16 ### Direct allocator storage
17
18 The [StoragePolicy] [direct_storage] stores an allocator object directly as member.
19 It can be initialized by moving in another [RawAllcoator] instance.
20 Moving it also moves the allocator.
21
22 The alias [allocator_adapter] is an [allocator_storage] with this policy and no mutex.
23 It simply provides the full interface for the allocator without any additional semantics.
24 A little bit more semantics provides the alias [thread_safe_allocator] it synchronizes access through a specified mutex.
25
26 ### Reference allocator storage
27
28 The [StoragePolicy] [reference_storage] stores a pointer to an allocator object.
29 Although it stores a pointer, it always references an object, i.e. it is never `null`.
30
31 It provides three slightly different semantics depending on whether or not the allocator is stateful:
32
33 * For stateful allocator, it takes a reference to it. Then it will store a pointer to the given allocator.
34 It does not take ownership, i.e. the passed allocator object must live longer than the reference to it!
35
36 * For stateless allocators, it uses a `static` object in order to return a reference in `get_allocator()`.
37 But this means that they don't actually depend on the lifetime of the given allocator and also can take temporaries.
38
39 * For special allocators that already provide reference semantics (determined through traits specialization), it behaves like a [direct_storage] policy.
40
41 In either case, the class is nothrow copyable and never actually moves the referred allocator, just copies the pointer.
42 A copy of a [reference_storage] references the same allocator as the origin.
43
44 The alias [allocator_reference] uses this storage policy.
45
46 ### Type-erased reference allocator storage
47
48 The [reference_storage] takes the type of the allocator being stored.
49 A specialization takes the tag type [any_allocator].
50 It provides type-erased semantics.
51
52 The constructors are templated to take any [RawAllocator] - with the same restrictions for statefulness as in the normal case -
53 and stores a pointer to it using type-erasure.
54
55 The accessor functions return the base class used in the type-erasure which provides the full [RawAllocator] members.
56 Note that it is not possible to get back to the actual type, i.e. call functions in the actual allocator interface.
57
58 The tag type can be used anywhere where an [allocator_reference] is used, i.e. [allocator_deleter], [std_allocator] or custom containers.
59 For convienience, the alias [any_reference_storage] simply refers to this specialization, as does the actual storage class [any_allocator_reference].
60
61 ## std_allocator
62
63 The class [std_allocator] takes a [RawAllocator], stores it in an [allocator_reference] and provides the `Allocator` interface.
64 This allows using raw allocator objects with classes requiring the standardized concept like STL containers.
65 It takes care of allocator propagation (always propagate), comparision and provides the full boilerplate.
66
67 The tag type [any_allocator] can be used to enable type erasure, the alias [any_std_allocator] is exactly that.
68
69 They are defined in the header std_allocator.hpp.
70
71 ## Tracking
72
73 A special case of adapter is the class [tracked_allocator] defined in the header tracking.hpp.
74 It allows to track the called functions on a [RawAllocator].
75 This is done via a [Tracker].
76
77 A `Tracker` provides functions that gets called when the corresponding function on the allocator gets called.
78 For example, the function `allocate_node()` leads to call on the tracker function `on_allocate_node()`.
79
80 This is an example of a [memory_pool] that has been tracked with a `Tracker` that logs all (de-)allocations:
81
82 ```cpp
83 #include <iostream>
84
85 #include <memory/memory_pool.hpp> // for memory_pool
86 #include <memory/tracking.hpp> // for tracked_allocator
87
88 struct log_tracker
89 {
90 void on_node_allocation(void *mem, std::size_t size, std::size_t) noexcept
91 {
92 std::clog << this << " node allocated: ";
93 std::clog << mem << " (" << size << ") " << '\n';
94 }
95
96 void on_array_allocation(void *mem, std::size_t count, std::size_t size, std::size_t) noexcept
97 {
98 std::clog << this << " array allocated: ";
99 std::clog << mem << " (" << count << " * " << size << ") " << '\n';
100 }
101
102 void on_node_deallocation(void *ptr, std::size_t, std::size_t) noexcept
103 {
104 std::clog << this << " node deallocated: " << ptr << " \n";
105 }
106
107 void on_array_deallocation(void *ptr, std::size_t, std::size_t, std::size_t) noexcept
108 {
109 std::clog << this << " array deallocated: " << ptr << " \n";
110 }
111 };
112
113 int main()
114 {
115 auto tracked_pool = memory::make_tracked_allocator(log_tracker{}, memory::memory_pool<>(16, 1024));
116 // go on using the tracked_pool
117 }
118 ```
119
120 The `log_tracker` above uses the address of the tracker object to identify a certain allocator in the output,
121 this is completely legal.
122 Note that the function `make_tracked_allocator()` which returns the appropriate [tracked_allocator] takes ownership over the pool,
123 you can either pass a temporary as shown or move in an existing pool.
124 The result `tracked_pool` provides the full [RawAllocator] interface and can be used as usual,
125 except that all (de-)allocations are logged.
126
127 ## Other adapters
128
129 ### aligned_allocator
130
131 The allocator adapter [aligned_allocator] wraps a [RawAllocator] and ensure a certain minimum alignment on all functions.
132
133 [any_allocator]: \ref foonathan::memory::any_allocator
134 [allocator_storage]: \ref foonathan::memory::allocator_storage
135 [allocator_deleter]: \ref foonathan::memory::allocator_deleter
136 [allocator_adapter]: \ref foonathan::memory::allocator_adapter
137 [allocator_reference]: \ref foonathan::memory::allocator_reference
138 [any_allocator_reference]: \ref foonathan::memory::any_allocator_reference
139 [thread_safe_allocator]: \ref foonathan::memory::thread_safe_allocator
140 [direct_storage]: \ref foonathan::memory::direct_storage
141 [reference_storage]: \ref foonathan::memory::reference_storage
142 [any_reference_storage]: \ref foonathan::memory::any_reference_storage
143 [std_allocator]: \ref foonathan::memory::std_allocator
144 [any_std_allocator]: \ref foonathan::memory::any_std_allocator
145 [aligned_allocator]: \ref foonathan::memory::aligned_allocator
146 [memory_pool]: \ref foonathan::memory::memory_pool
147 [RawAllocator]: md_doc_concepts.html#concept_rawallocator
148 [StoragePolicy]: md_doc_concepts.html#concept_storagepolicy
149 [Tracker]: md_doc_concepts.html#concept_tracker
0 # Concepts and overall requirements
1
2 ## Node
3 <a name="concept_node"></a>
4
5 A *node* is the region of storage needed to hold a single object.
6 This storage region is identified via a pointer which is the *address* of the node.
7 The terms *node* and *address of the node* are used interchangeable.
8
9 It can be described using two properties: a *size* and an *alignment*.
10 Both are represented as values of type `std::size_t`.
11 The *alignment* must be a non-negative power of two.
12 It describes the alignment requirement of the storage region,
13 i.e. the address is dividable by the alignment.
14 The *size* must be any valid value of `std::size_t` except `0`.
15 It describes the size of the storage region,
16 `size` bytes after the address are available for the node.
17
18 The following requirements must be fulfilled to create an object of type `T` in a node,
19 i.e. to call a placement new `new(address) T(ctor-args)`:
20
21 * The alignment of the node must be at least as big as the alignment of the type, returned by `alignof(T)`.
22 If it is bigger, the type is *over-aligned* in this node, otherwise, it is *normal-aligned*.
23
24 * The size of the node must be at least as big as the size of the type, returned by `sizeof(T)`.
25
26 *Example:* A node returned by a call to `allocate_node(sizeof(T), alignof(T))` of a [RawAllocator](#concept_rawallocator)
27 always fulfills these requirements for the type `T`.
28
29 ## Array of nodes
30 <a name="concept_array"></a>
31
32 An *array of nodes* is a sequence of nodes whose storage regions are consecutively in memory.
33 The *address* of the array is simply the address of the first node in the array.
34 The terms *array* and *address of the array* are used interchangeable.
35
36 In addition to the size and alignment, it has an additional property, the *count*.
37 The *count* is the number of nodes and must be a valid value of type `std::size_t` except `0`.
38 The *size* is the size of each node and has the same requirements as for a node.
39 The *alignment* is the alignment of the first node in the array and has the same requirements as for a node.
40
41 The `i`th node of the array is at position `adress + i * size`.
42 The total memory occupied by an array is thus `count * size`.
43 The size specifies the alignment of each node except the first implicit through the position.
44
45 The following requirements must be fulfilled to create an array of `n` `T` objects in an array of nodes,
46 i.e. to create an object in each node:
47
48 * The count of nodes must be at least `n`.
49
50 * The alignment of the array must be at least `alignof(T)`.
51
52 * The size of each node must be at least `sizeof(T)` and a multiple of `alignof(T)`.
53 This is required to ensure proper alignment of each node in the array.
54 *Note:* To create over-aligned types in the nodes, the alignment must be the stricter alignment
55 and the size must be a multiple of the stricter alignment.
56
57 *Example:* An array of nodes returned by a call to `allocate_array(n, sizeof(T), alignof(T))` of a [RawAllocator](#concept_rawallocator)
58 always fulfills these requirements for the type `T`.
59 A call of the form `allocate_array(n, size, align)` where `align` is a stricter alignment
60 and `size` is a multiple of `align` bigger than `sizeof(T)` returns an array of nodes
61 where each node is over-aligned.
62
63 ## RawAllocator
64 <a name="concept_rawallocator"></a>
65
66 A `RawAllocator` is the new type of allocator used in this library.
67 Unlike the `Allocator` it does not work on a certain type directly,
68 but only in terms of nodes and arrays of nodes.
69 Thus it is unable to specify things like pointer types or construction function.
70 It is only responsible for allocating and deallocating memory for nodes.
71
72 A `RawAllocator` can either be *stateful* or *stateless*.
73 A stateful allocator has some state, i.e. member variables, that need to be stored across calls.
74 A stateless allocator can be constructed on-the-fly for each member function call.
75 A pointer allocated by one instance of a stateless allocator can be deallocated with any other instance.
76 All member functions are assumed to be thread-safe and can be called without synchronization.
77 This is not valid for stateful allocator.
78 Most of the time stateless allocators are also empty types, although this is not required
79 (*note:* it does not make much sense for them to be not-empty, since the values of the member variables is not required to be the same.)
80 An additional requirement for stateless allocator is that they have a default constructor.
81
82 Access to a `RawAllocator` is only done via the class [allocator_traits].
83 It can be specialized for own `RawAllocator` types.
84 The requirements for such a specialization are shown in the following table,
85 where `traits` is `allocator_traits<RawAllocator`, `alloc` is an instance of type `traits::allocator_type`,
86 `calloc` is a `const` `alloc`, `size` is a valid node size, `alignment` is a valid alignment, `count` is a valid array count,
87 `node` is a [node](#concept_node) returned by `traits::allocate_node` and `array` is an [array](#concept_array) returned by `traits::allocate_array`:
88
89 Expression|Return Type|Throws|Description
90 ----------|-----------|------|-----------
91 `traits::allocator_type` | `RawAllocator` (most of the time) | - (typedef) | *Note*: The default specialization uses this typedef to rebind a standard `Allocator` to `char`, to be able to use it to allocate single bytes. In most other cases, it should be the same type as the template parameter, if not, it must be implicitly convertible. Be aware that this is the type actual being stored and passed to all other functions.
92 `traits::is_stateful` | `std::true_type` or `std::false_type` or inherited | - (typedef) | Describes whether or not an allocator is stateful.
93 `traits::allocate_node(alloc, size, alignment)` | `void*` | `std::bad_alloc` or derived | Allocates a [node](#concept_node) and returns its address. Must not return `nullptr`.
94 `traits::allocate_array(alloc, count, size, alignment)` | `void*` | `std::bad_alloc` or derived | Allocates an [array](#concept_array) and returns its address. Must not return `nullptr`.
95 `traits::deallocate_node(alloc, node, size, alignment)` | `void` | must not throw | Deallocates a [node](#concept_node). `alloc`, `size` and `alignment` must be the same as in the allocation.
96 `traits::deallocate_array(alloc, array, count, size, alignment)` | `void` | must not throw | Deallocates an [array](#concept_array). `alloc`, `count`, `size` and `alignment` must be the same as in the allocation.
97 `traits::max_node_size(calloc)` | `std::size_t` | can throw anything, but should throw nothing | Returns the maximum size for a [node](#concept_node), i.e. the maximum value allowed as `size`. *Note:* Only an upper-bound value, actual maximum might be less.
98 `traits::max_array_size(calloc)` | `std::size_t` | can throw anything, but should throw nothing | Returns the maximum *raw* size for an [array](#concept_array), i.e. the maximum value allowed for `count * size`. *Note:* Only an upper-bound value, actual maximum might be less.
99 `traits::max_alignment(calloc)` | `std::size_t` | can throw anything, but should throw nothing | Returns the maximum supported alignment, i.e. the maximum value allowed for `alignment`. Must be at least `alignof(std::max_align_t)`.
100
101 The typedef `traits::allocator_type` is the actual *state* type of the allocator.
102 This is the type being stored and passed to all functions.
103 It must be implicitly convertible from a `RawAllocator`, provide move operations that do not throw
104 and must be a valid base class, i.e. not a built-in type or marked `final`, but does not need virtual functions.
105
106 The two allocation functions must never return a `nullptr`. If the allocation was unsuccessful,
107 they can either throw an exception derived from `std::bad_alloc` or terminate the program (not recommended).
108 They must be prepared to handle sizes or alignments bigger than the values returned by `max_*`, i.e. by throwing an exception.
109
110 Moving a stateful `RawAllocator` moves the ownership over the allocated memory, too.
111 That means that after a move, memory allocated by the old allocator must be freed by the new one,
112 not by the old one.
113 But a moved from allocator must still be usable for further memory allocations.
114 For stateless allocators this is not required, since all objects must deallocate all memory allocated by any other object.
115
116 To allow for an easier use, the default specialization of the [allocator_traits]
117 forwards to the appropriate member functions or uses the specified fallback:
118
119 Expression|RawAllocator|Fallback
120 ----------|------------|--------
121 `traits::allocator_type` | `RawAllocator` | see below
122 `traits::is_stateful` | `RawAllocator::is_stateful` | empty types will be stateless and non-empty types stateful
123 `traits::allocate_node(alloc, size, alignment)` | `alloc.allocate_node(size, alignment)` | see below
124 `traits::allocate_array(alloc, count, size, alignment)` | `alloc.allocate_array(count, size, alignment)` | `traits::allocate_node(alloc, count * size, alignment)`
125 `traits::deallocate_node(alloc, node, size, alignment)` | `alloc.deallocate_node(node, size, alignment)` | see below
126 `traits::deallocate_array(alloc, array, count, size, alignment)` | `alloc.allocate_array(array, count, size, alignment)` | `traits::deallocate_node(alloc, count * size, alignment)`
127 `traits::max_node_size(calloc)` | `calloc.max_node_size()` | maximum value of type `std::size_t`
128 `traits::max_array_size(calloc)` | `calloc.max_array_size()` | `traits::max_node_size(calloc)`
129 `traits::max_alignment(calloc)` | `calloc.max_alignment()` | `alignof(std::max_align_t)`
130
131 To allow rebinding required for traditional `Allocator`s, there is an additional behavior when selecting the fallback.
132 If the parameter of the `allocator_traits` contains a typedef `value_type`, `traits::allocator_type` will rebind the type to `char`.
133 This is done in the same way `std::allocator_traits` does it, i.e. first try to access the `rebind` member struct,
134 then a type `alloc<T, Args...>` will be rebound to `alloc<char, Args...>`.
135 If the parameter does not provide a member function `allocate_node`, it will try and call the allocation function required by the `Allocator` concept,
136 i.e. `static_cast<void*>(alloc.allocate(size)`, likewise for `deallocate_node` which will call forward to the deallocation function `alloc.deallocate(static_cast<char*>(node), size)`.
137
138 This enables the usage of any type modelling the `Allocator` concept where a `RawAllocator` is expected.
139 It is only enabled, however, if the `Allocator` does not provide custom `construct()`/`destroy()` function since they would never be called.
140 The checking can be overriden by specializing the traits class [allocator_is_raw_allocator](\ref foonathan::memory::allocator_is_raw_allocator).
141 Note that it does *not* use the `std::allocator_traits` but calls the functions directly enabling only the `Allocator` classes that do not have specialized the traits template.
142
143 For exposition, this is the minimum required interface for a `RawAllocator` without an appropriate specialization:
144
145 ```cpp
146 struct min_raw_allocator
147 {
148 min_raw_allocator(min_raw_allocator&&) noexcept;
149 ~min_raw_allocator() noexcept;
150 min_raw_allocator& operator=(min_raw_allocator&&) noexcept;
151
152 void* allocate_node(std::size_t size, std::size_t alignment);
153 void deallocate_node(void *node, std::size_t size, std::size_t alignment) noexcept;
154 };
155 ```
156
157 *Note*: If a RawAllocator provides a member function for allocation/deallocation, it is not allowed to mix those two interfaces,
158 i.e. allocate memory through the traits and deallocate through the member function or vice-versa.
159 It is completely allowed that those functions do completely different things.
160
161 ### Composable RawAllocator
162
163 A RawAllocator can be *composable*.
164 Access to the composable (de)allocation functions is only done through the [composable_allocator_traits].
165 It can be specialized for your own allocator types.
166 The requirements for such a specialization are shown in the following table,
167 where `ctraits` is `composable_allocator_traits<RawAllocator`, `alloc` is an instance of type `traits::allocator_type`, `size` is a valid node size, `alignment` is a valid alignment, `count` is a valid array count,
168 `node` is any non-null [node](#concept_node) and `array` is any non-null [array](#concept_array):
169
170 Expression | Return Type | Description
171 -----------|-------------|------------
172 `ctraits::allocator_type` | `allocator_traits<RawAllocator>::allocator_type` | just forwards to the regular traits
173 `ctraits::try_allocate_node(alloc, size, alignment)` | `void*` | Similar to the `allocate_node()` function but returns `nullptr` on failure instead of throwing an exception.
174 `ctraits::try_allocate_array(alloc, count, size, alignment)` | `void*` | Similar to the `allocate_array()` function but returns `nullptr` on failure instead of throwing an exception.
175 `ctraits::try_deallocate_node(alloc, node, size, alignment)` | `bool` | Similar to the `deallocate_node()` function but can be called with *any* [node](#concept_node). If that node was allocated by `alloc`, it will be deallocated and the function returns `true`. Otherwise the function has no effect and returns `false`.
176 `ctraits::try_deallocate_array(alloc, array, count, size, alignment)` | `bool` | Similar to the `deallocate_array()` function but can be called with *any* [array](#concept_array). If that array was allocated by `alloc`, it will be deallocated and the function returns `true`. Otherwise the function has no effect and returns `false`.
177
178 Unlike the normal allocation functions, the composable allocation functions are allowed to return `nullptr` on failure,
179 they must never throw an exception.
180 The deallocation function can be called with arbitrary nodes/arrays.
181 The allocator must be able to detect whether they were originally allocated by the allocator and only deallocate them if that is the case.
182 You are not allowed to mix the composable and normal allocation functions.
183
184 Like [allocator_traits] the default [composable_allocator_traits] specialization forwards to member functions or uses a fallback:
185
186
187 Expression|RawAllocator|Fallback
188 ----------|------------|--------
189 `ctraits::allocator_type` | - | `allocator_traits<RawAllocator>::allocator_type`
190 `ctraits::try_allocate_node(alloc, size, alignment)` | `alloc.try_allocate_node(size, alignment)` | none, required
191 `ctraits::try_allocate_array(alloc, count, size, alignment)` | `alloc.try_allocate_array(count, size, alignment)` | `ctraits::try_allocate_node(alloc, count * size, alignment)`
192 `ctraits::try_deallocate_node(alloc, node, size, alignment)` | `alloc.try_deallocate_node(node, size, alignment)` | non, required
193 `ctraits::try_deallocate_array(alloc, array, count, size, alignment)` | `alloc.try_deallocate_array(array, count, size, alignment)` | `ctraits::try_deallocate_node(alloc, array, count * size, alignment)`
194
195 ## BlockAllocator
196 <a name="concept_blockallocator"></a>
197
198 Some allocator types manage huge memory blocks and returns part of them in their allocation functions.
199 Such huge memory blocks are managed by a memory arena, implemented in the class [memory_arena].
200
201 The size and the allocation of the memory blocks is controlled by a `BlockAllocator`.
202 It is responsible to allocate and deallocate those blocks. It must be nothrow moveable and a valid base class, i.e. not `final`. In addition, it must provide the following:
203
204 Expression|Semantics
205 ----------|---------
206 `BlockAllocator(block_size, args)`|Creates a `BlockAllocator` by giving it a non-zero initial block size and optionally multiple further arguments.
207 `alloc.allocate_block()`|Returns a new [memory_block] object that is the next memory block.
208 `alloc.deallocate_block(block)`|Deallocates a `memory_block`. Deallocation will be done in reverse order.
209 `calloc.next_block_size()`|Returns the size of the `memory_block` in the next allocation.
210
211 The alignments of the allocated memory blocks must be the maximum alignment.
212
213 This is a sample `BlockAllocator` that uses `new` for the allocation:
214
215 ```cpp
216 class block_allocator
217 {
218 public:
219 block_allocator(std::size_t block_size)
220 : block_size_(block_size) {}
221
222 memory_block allocate_block()
223 {
224 auto mem = ::operator new(block_size_);
225 return {mem, block_size_};
226 }
227
228 void deallocate_block(memory_block b)
229 {
230 ::operator delete(b.memory);
231 }
232
233 std::size_t next_block_size() const
234 {
235 return block_size_;
236 }
237
238 private:
239 std::size_t block_size_;
240 };
241 ```
242
243 ## StoragePolicy
244 <a name="concept_storagepolicy"></a>
245
246 A `StoragePolicy` stores a [RawAllocator](#concept_rawallocator) and is used with the class template [allocator_storage].
247 It specifies how the allocator is stored, i.e. whether it is stored directly or only a pointer to it.
248 It must always store a `RawAllocator` instance.
249 The `StoragePolicy` must be class that is nothrow moveable and be a valid base class, i.e. not `final`.
250
251 In addition it must provide the following:
252
253 Expression|Semantics
254 ----------|---------
255 `StoragePolicy::allocator_type` | The type of the allocator being stored as determinted through the [allocator_traits]. For a type-erased storage, it can be the type-erased base class.
256 `StoragePolicy(args)` | Creates the `StoragePolicy`. `args` can be anything. It is used to create the allocator.
257 `policy.get_allocator()` | Returns a reference to the `allocator_type`. Must not throw. May return a `const` reference, if `policy` is `const`.
258 `policy.is_composable()` | Returns whether or not the `allocator_type` is a [ComposableAllocator](#concept_composableallocator)
259
260 For exposition, this is a sample `StoragePolicy`.
261 Note that it is not required to be a template, although it does not make much sense otherwise.
262
263 ```cpp
264 template <class RawAllocator>
265 class storage_policy
266 {
267 public:
268 using allocator_type = typename memory::allocator_traits<RawAllocator>::allocator_type;
269
270 storage_policy(RawAllocator &&alloc) noexcept
271 : alloc_(std::move(alloc)) {}
272
273 allocator_type& get_allocator() noexcept
274 {
275 return alloc_;
276 }
277
278 const allocator_type& get_allocator() const noexcept
279 {
280 return alloc_;
281 }
282
283 bool is_composable() const noexcept
284 {
285 return memory::is_composable_allocator<allocator_type>::value;
286 }
287
288 private:
289 allocator_type alloc_;
290 };
291 ```
292
293 ## Segregatable
294 <a name="concept_segregatable"></a>
295
296 A `Segregatable` stores a [RawAllocator](#concept_rawallocator) and controls for which allocations it will be used.
297 It is used in [binary_segregator].
298
299 It must be nothrow movable and provide the following:
300
301 Expression|Type|Semantics
302 ----------|----|---------
303 `Segregatable::allocator_type`|some [RawAllocator](#concept_rawallocator)|The type of the allocator it controls.
304 `segregatable.get_allocator()`|`allocator_type&`|A reference to the allocator object it controls.
305 `const_segregatable.get_allocator()`|`const allocator_type&`|A `const` reference to the allocator object it controls.
306 `segregatable.use_allocate_node(size, alignment)`|`bool`|Whether or not the allocator object will be used for a node allocation with this specific properties. If it returns `true`, `allocate_node()` of the allocator object with the same parameters will be called, if it returns `false`, it will not be used.
307 `segregatable.use_allocate_array(count, size, alignment)`|`bool`|Whether or not the allocator object will be used for an array allocation with this specific properties. If it returns `true`, `allocate_array()` of the allocator object with the same parameters will be called, if it returns `false`, it will not be used.
308
309 For exposition, this is a simple `Segregatable` that will always use the given allocator:
310
311 ```cpp
312 template <class RawAllocator>
313 class segregatable
314 {
315 public:
316 using allocator_type = typename memory::allocator_traits<RawAllocator>::allocator_type;
317
318 segregatable(RawAllocator &&alloc) noexcept
319 : alloc_(std::move(alloc)) {}
320
321 allocator_type& get_allocator() noexcept
322 {
323 return alloc_;
324 }
325
326 const allocator_type& get_allocator() const noexcept
327 {
328 return alloc_;
329 }
330
331 bool use_allocate_node(std::size_t, std::size_t) noexcept
332 {
333 return true;
334 }
335
336 bool use_allocate_array(std::size_t, std::size_t) noexcept
337 {
338 return true;
339 }
340
341 private:
342 allocator_type alloc_;
343 };
344 ```
345
346 ## Tracker
347 <a name="concept_tracker"></a>
348
349 A `Tracker` tracks allocation and/or deallocation of a `RawAllocator` and is used in the class template [tracked_allocator].
350 It is a moveable class that can be used as base class.
351 No operation on a `Tracker` may throw.
352 The address of a `Tracker` can be used as a unique, runtime identifier for a certain `RawAllocator`.
353
354 An instance `tracker` of it must provide the following functions:
355
356 Expression|Semantics
357 ----------|---------
358 `tracker.on_node_allocation(node, size, alignment)` | Gets called after a [node](#concept_node) with given properties has been allocated.
359 `tracker.on_node_deallocation(node, size, alignment)` | Gets called before a [node](#concept_node) with given properties is deallocated.
360 `tracker.on_array_allocation(array, count, size, alignment)` | Same as the [node](#concept_node) version, but for [arrays](#concept_array).
361 `tracker.on_array_deallocation(array, count, size, alignment)` | Same the [node](#concept_node) version, but for [arrays](#concept_array).
362
363 *Note*: Those tracking functions are also called after a succesful composable (de)allocation function.
364
365 A *deep tracker* also tracks a [BlockAllocator](#concept_block_allocator) of another allocator
366 and thus allows monitoring the often more expensive big allocations done by it.
367 Such a `Tracker` must provide the following additional functions:
368
369 Expression|Semantics
370 ----------|---------
371 `tracker.on_allocator_growth(memory, size)` | Gets called after the block allocator has allocated the passed memory block of given size.
372 `tracker.on_allocator_shrinkage(memory, size)` | Gets called before a given memory block of the block allocator will be deallocated.
373
374 For exposition, this is a sample `Tracker`:
375
376 ```cpp
377 struct tracker
378 {
379 void on_node_allocation(void *mem, std::size_t size, std::size_t) noexcept
380 {
381 std::clog << this << " node allocated: ";
382 std::clog << mem << " (" << size << ") " << '\n';
383 }
384
385 void on_array_allocation(void *mem, std::size_t count, std::size_t size, std::size_t) noexcept
386 {
387 std::clog << this << " array allocated: ";
388 std::clog << mem << " (" << count << " * " << size << ") " << '\n';
389 }
390
391 void on_node_deallocation(void *ptr, std::size_t, std::size_t) noexcept
392 {
393 std::clog << this << " node deallocated: " << ptr << " \n";
394 }
395
396 void on_array_deallocation(void *ptr, std::size_t, std::size_t, std::size_t) noexcept
397 {
398 std::clog << this << " array deallocated: " << ptr << " \n";
399 }
400 };
401 ```
402
403 [allocator_storage]: \ref foonathan::memory::allocator_storage
404 [allocator_traits]: \ref foonathan::memory::allocator_traits
405 [composable_allocator_traits]: \ref foonathan::memory::composable_allocator_traits
406 [memory_arena]: \ref foonathan::memory::memory_arena
407 [memory_block]: \ref foonathan::memory::memory_block
408 [binary_segregator]: \ref foonathan::memory::binary_segregator
409 [tracked_allocator]: \ref foonathan::memory::tracked_allocator
0 # Debugging options and error handling
1
2 ## Error handling
3
4 By default, there are two exceptions thrown when something goes wrong.
5
6 The class [out_of_memory] is thrown when a low-level allocator like a [heap_allocator] runs out of memory.
7 It is inherited from `std::bad_alloc`.
8
9 The class [bad_allocation_size] is thrown when a higher-level allocator like a [memory_pool] is requested to allocate node bigger than its [node] size, for example.
10 Another case is an array which is simply too big. It is also inherited from `std::bad_alloc`.
11
12 This error handling mechanism can be configured to work without exceptions.
13 Each exception class contains a static `handler` function which can be set to a user-defined function.
14 This function gets called in the exception constructor.
15 It takes information about the allocator such as name and address and some exception-related information, such as the size for which the allocation fails.
16
17 The handler can do anything it wants, i.e. log the error.
18 If the handler returns, the exception will be thrown.
19
20 If exceptions are disabled in the library any `throw` statement will be translated to a call to `std::abort()`,
21 the handler functions are then especially useful as they will still be called.
22
23 ## Debugging
24 <a name="debugging"></a>
25
26 There are also facilities useful for tracking memory related errors.
27
28 Memory leaks can be tracked down with the [leak_handler].
29 It will be called when an allocator is destroyed without all memory being deallocated.
30 There is a distinction between allocating memory through the [allocator_traits] or via the allocator specific interface directly,
31 ie. [memory_stack]'s `unwind()` function. Only (de-)allocating through the [allocator_traits] will be tracked,
32 since then the user deals with a generic allocator and cannot rely on proper deallocation as opposed to dealing with a specific allocator,
33 which will return all memory when it is destroyed.
34 Leak checking can be completely disabled by the CMake option `FOONATHAN_MEMORY_DEBUG_LEAK_CHECK`.
35
36 Invalid pointers on deallocation functions such as double-free or was-never-allocated can be tracked via the [invalid_pointer_handler].
37 While checking for was-never-allocated is relatively cheap, double-free can be a lot more expensive, especially for pool allocators,
38 where the whole pool has to be check if the pointer is already in the pool
39 (the pool is kept sorted in that case to reduce the time, but it makes deallocation still not constant as without check).
40 So both options can be disabled separately, by `FOONATHAN_MEMORY_DEBUG_POINTER_CHECK` and `FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK` respectively.
41
42 The CMake option `FOONATHAN_MEMORY_DEBUG_FILL` controls pre-filling the memory.
43 This is useful for tracking errors coming from missing proper initialization after allocation or use-after-free.
44 The values can be read from the enum [debug_magic].
45
46 For catching overflow errors, the CMake option `FOONATHAN_MEMORY_DEBUG_FENCE_SIZE` can be set to an integral value.
47 It leads to additional fence memory being allocated before or after the memory.
48 Note that for alignment issues it may not be the exact size big.
49 On allocation the fence memory will be checked if it is still filled with the fence memory value,
50 if not the [buffer_overflow_handler] will be called.
51
52 Other internal assertions in the allocator code to test for bugs in the library can be controlled via the CMake option `FOONATHAN_MEMORY_DEBUG_ASSERT`.
53
54 [out_of_memory]: \ref foonathan::memory::out_of_memory
55 [bad_allocation_size]: \ref foonathan::memory::bad_allocation_size
56 [heap_allocator]: \ref foonathan::memory::heap_allocator
57 [memory_pool]: \ref foonathan::memory::memory_pool
58 [leak_handler]: \ref foonathan::memory::leak_handler
59 [buffer_overflow_handler]: \ref foonathan::memory::buffer_overflow_handler
60 [invalid_pointer_handler]: \ref foonathan::memory::invalid_pointer_handler
61 [allocator_traits]: \ref foonathan::memory::allocator_traits
62 [memory_stack]: \ref foonathan::memory::memory_stack
63 [debug_magic]: \ref foonathan::memory::debug_magic
0 # Using RawAllocators in STL containers, smart pointers, etc.
1
2 ## 1. Using Containers with RawAllocators
3
4 The following listing shows how to use a [memory_pool] with a `std::list` container:
5
6 ~~~{.cpp}
7 #include <memory/container.hpp> // for list, list_node_size
8 #include <memory/memory_pool.hpp> // for memory_pool
9
10 int main()
11 {
12 memory::memory_pool<> pool(memory::list_node_size<int>::value, 1024);
13 memory::list<int, decltype(pool)> list(pool);
14 // use list as normal
15 }
16 ~~~
17
18 The first line in `main()` declares a [memory_pool].
19 A memory pool is a special kind of allocator that takes a big memory block and separates it into many smaller
20 [nodes] of a given size. Free nodes are put onto a list and can be retrieved in constant time.
21 This structures allows (de-)allocations in any order, but only for a fixed size.
22 But this is exactly the memory footprint for a node based container, like `std::list`:
23 Each element has the same size and they can be created and destroyed at any time.
24
25 [memory_pool] is templated but the default parameters are just fine for the most use cases so they are used.
26 Its constructor takes two parameters: The first one is the fixed size of each node.
27 The pool will be used two allocate the nodes for a list of int, but `sizeof(int)` isn't enough,
28 since each node also stores the two pointers to the next and previous node in the list.
29 To avoid guessing its size which also varies between STL implementations,
30 they are automatically obtained on building and stored in integral constants of the from `<container>_node_size<T>`.
31 In this case it is a list of `int` and thus `list_node_size<int>::value` is the node size we need.
32 The second parameter is simply the size of big block that will be seperated.
33 All allocators that are working on bigger memory blocks can grow, if their initial capacity is exhausted,
34 but it is better to use a big size at the beginning.
35
36 The second line then actually declares the list.
37 Since [RawAllocator] provides a conceptually very different interface than `Allocator`,
38 they cannot be used directly, but need to be wrapped in the class [std_allocator].
39 It takes a `RawAllocator` and provides the interface of an `Allocator` forwarding to the underlying raw allocator
40 and taking care of rebinding, container copying and threading.
41 The raw allocator itself is only stored as reference, not directly embedded.
42 This is required by the `Allocator` model which wants to copy allocators, but `RawAllocator` objects are only moveable.
43 In addition, the `get_allocator()` function of containers only return a copy of the allocator,
44 access to the direct allocator isn't possible.
45 By storing a reference (actually a pointer) inside the `std_allocator`, copying is enabled
46 and the raw allocator can be accessed either directly or via getter function of the `std_allocator` object.
47
48 For simplicity, template aliases are provided in `container.hpp` that do the wrapping.
49 The above `memory::list<...>` is equivalent to `std::list<int, memory::std_allocator<int, decltype(pool)>>`.
50 Due to the nature of the `Allocator` model, the `value_type` has to be repeated twice,
51 also the `Allocator` is the last template parameter, leading to a very verbose
52 `std::unordered_map<Key, Value, std::hash<Key>, std::equal_to<Key>, memory::std_allocator<std::pair<const Key, Value>, RawAllocator>>`
53 as opposed to `memory::unordered_map<Key, Value, RawAllocator>`.
54 But of course the verbose form can be used, in this case `std_allocator.hpp` has to be included to get `std_allocator`.
55
56 Since the type of `list` is a normal `std::list`, just with a special allocator,
57 it provides all the normal functions. The constructor used is the one taking the allocator object,
58 an automatic conversion to `std_allocator` allows to pass it the pool directly.
59 Then the object can be used as normal, passed to algorithms and freely copied, moved or swapped.
60 `std_allocator` ensures that each copy uses the same [memory_pool] as its allocator.
61
62 The same procedure - create a [RawAllocator], wrap it inside a [std_allocator] and pass it to a container,
63 optionally the last two steps combined - can be used for all other STL containers, `basic_string` or any class taking an `Allocator` object.
64 Node size values are provided for all node based STL containers (lists, sets and maps).
65
66 ## 2. RawAllocators as Deleter classes
67
68 But not all STL classes require the full `Allocator`, some only need a `Deleter`.
69 A `Deleter` is just a function object that can be called with a pointer and should free it.
70 Like [std_allocator] is a wrapper to provide the `Allocator` interface,
71 there are two kinds of deleter wrappers defined in `deleter.hpp`:
72 [allocator_deallocator] and [allocator_deleter].
73 The former just deallocates the memory without calling destructors, the latter does call destructors.
74 They also store a reference instead of the actual allocator for the same reason as in `std_allocator`
75 and take care of synchronization.
76 And like the container typedefs, there is an easier way to handle the most common use case of deleters: smart pointers.
77
78 The following excerpt shows the handling of smart pointers:
79
80 ~~~.cpp
81 #include <memory/smart_ptr.hpp> // for allocate_XXX
82 ...
83 // assume we have a RawAllocator alloc
84 auto unique_ptr = memory::allocate_unique<int>(alloc, 5); // (1)
85 auto array_unique_ptr = memory::allocate_unique<int[]>(alloc, 10u) // (2)
86 auto shared_ptr = memory::allocate_shared<int>(alloc, 7) // (3)
87 ~~~
88
89 At (1) a `std::unique_ptr` is created storing a dynamically allocated `int` of value `5` via a `RawAllocator` `alloc`.
90 It is another great use case for C++11's `auto`,
91 the actual type would be `std::unique_ptr<int, memory::allocator_deleter<int, RawAllocator>>`.
92 The deleter and function also works with arrays, of course, as (2) shows:
93 It creates an array of `10` value-initialized integers.
94 A similar function is provided for `std::shared_ptr` used in (3).
95 It uses the `std::allocate_shared` function internally and thus guarantees the efficient allocation.
96 Like in the standard library, there is no array version for shared pointers.
97 And since the results are instantiations of the actual standard library pointers,
98 they can be used as usual.
99 Especially `std::shared_ptr` can be very easily integrated,
100 since the actual allocator or deleter is type erased.
101
102 ## 3. Temporary allocations
103
104 The third big use case for allocators besides containers or single objects
105 are temporary allocations.
106 Sometimes an algorithm needs a temporary buffer to store some results.
107 Variable length arrays - although currently not part of the C++ standard - are a common solution.
108 There are either compiler extensions allowing normal variables to be used as array size directly
109 or the more low level approach via `alloca()`.
110 `alloca()` allocates memory by simply adjusting the top pointer of the stack.
111 The resulting memory is thus available directly on the stack and will be automatically freed on function exit.
112 The allocation is also much faster than heap allocation directly.
113
114 But although `alloca()` is available on many platforms, it is not portable.
115 In addition, out of memory cannot be reported, since it leads to a stack overflow
116 and nothing can be done then.
117 Thus it is not recommended to use it.
118 Instead, use the [temporary_allocator] class available in `temporary_allocator.hpp`.
119 It does not use the real program stack for the allocation,
120 but its own, separate stack for each thread obtained from the heap.
121
122 Below is a simple implementation of a merge sort that uses a temporary buffer:
123
124 ~~~.cpp
125 #include <algorithm>
126 #include <iterator>
127
128 #include <memory/container.hpp> // for memory::vector
129 #include <memory/temporary_allocator.hpp> // for memory::temporary_allocator
130
131 template <typename RAIter>
132 void merge_sort(RAIter begin, RAIter end)
133 {
134 using value_type = typename std::iterator_traits<RAIter>::value_type;
135
136 if (end - begin <= 1)
137 return;
138 auto mid = begin + (end - begin) / 2;
139
140 memory::temporary_allocator alloc; // (1)
141 memory::vector<value_type, memory::temporary_allocator> first(begin, mid, alloc),
142 second(mid, end, alloc); // (2)
143
144 merge_sort(first.begin(), first.end());
145 merge_sort(second.begin(), second.end());
146 std::merge(first.begin(), first.end(), second.begin(), second.end(), begin);
147 }
148 ~~~
149
150 The usage of [temporary_allocator] is just as usual:
151 At (1), the allocator is created.
152 Then it can be used to create the vectors as usual in (2).
153
154 Behind the scenes, a little bit of more work is done.
155 As mentioned the allocator uses its own internal memory stack, one per thread.
156 By default a lot of magic ensures that there is a stack object created when needed and destroyed on thread exit.
157 This internal stack is the [temporary_stack] and you can access it for the current thread through the `get_temporary_stack()` function, which is also called by the default constructor of the [temporary_allocator].
158 If the stack wasn't already created for the current thread it will be by calling this function.
159 Once a stack is created it will also be destroyed on thread exit.
160
161 > This isn't quite true.
162 > On some platforms it might only be destroyed on full program exit,
163 > if thread exit can't be created.
164
165 You can also use explicit lifetime control of the stack through the `temporary_stack_initializer` class.
166 Its constructor will create the stack and the destructor will destroy it.
167 This gives you more control than the "magic" done to ensure the destruction.
168
169 Because the per-thread stack managment can has a little overhead,
170 you can control it with the `FOONATHAN_MEMORY_TEMPORARY_STACK_MODE` variable.
171 If `2`, the behavior is as described here, with the fully automated managment.
172 If `1`, you have to use the `temporary_stack_initializer` to ensure the destructor call,
173 because the automated managment is disabled.
174 And if `0`, there is no per-thread stack at all, calling `get_temporary_stack()` is not allowed;
175 you have to create one yourself and pass it to the constructor of [temporary_allocator].
176
177 The allocator itself now saves the current top of the stack in its constructor.
178 Allocation will simply move the stack pointer and is such extremely fast.
179 Deallocations are a no-op, because the destructor of the allocator will unwind to the saved position.
180 You cannot move a temporary allocator, it is such not really a [RawAllocator].
181 Because of the automatic unwinding in the destructor, you must not allocate from an allocator that isn't the last created object.
182 If the internal stack is exhausted, it can grow although this may lead to a slow heap allocation
183 and can thus be controled by a growth handler.
184
185 [allocator_deallocator]: \ref foonathan::memory::allocator_deallocator
186 [allocator_deleter]: \ref foonathan::memory::allocator_deleter
187 [memory_pool]: \ref foonathan::memory::memory_pool
188 [std_allocator]: \ref foonathan::memory::std_allocator
189 [temporary_allocator]: \ref foonathan::memory::temporary_allocator
190 [temporary_stack]: \ref foonathan::memory::temporary_stack
191 [nodes]: md_doc_concepts.html#concept_node
192 [RawAllocator]: md_doc_concepts.html#concept_rawallocator
0 <!-- HTML footer for doxygen 1.8.16-->
1 <!-- start footer part -->
2 <!--BEGIN GENERATE_TREEVIEW-->
3 <div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
4 <ul>
5 $navpath
6 <li class="footer">$generatedby
7 <a href="http://www.doxygen.org/index.html">
8 <img class="footer" src="$relpath^doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
9 </ul>
10 </div>
11 <!--END GENERATE_TREEVIEW-->
12 <!--BEGIN !GENERATE_TREEVIEW-->
13 <hr class="footer"/><address class="footer"><small>
14 $generatedby &#160;<a href="http://www.doxygen.org/index.html">
15 <img class="footer" src="$relpath^doxygen.png" alt="doxygen"/>
16 </a> $doxygenversion
17 </small></address>
18 <!--END !GENERATE_TREEVIEW-->
19 </body>
20 </html>
0 <!-- HTML header for doxygen 1.8.16-->
1 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
2 <html xmlns="http://www.w3.org/1999/xhtml">
3 <head>
4 <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
5 <meta http-equiv="X-UA-Compatible" content="IE=9"/>
6 <meta name="generator" content="Doxygen $doxygenversion"/>
7 <meta name="viewport" content="width=device-width, initial-scale=1"/>
8 <!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
9 <!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
10 <link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
11 <script type="text/javascript" src="$relpath^jquery.js"></script>
12 <script type="text/javascript" src="$relpath^dynsections.js"></script>
13 $treeview
14 $search
15 $mathjax
16 <link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
17 $extrastylesheet
18 </head>
19 <body>
20 <div id="top"><!-- do not remove this div, it is closed by doxygen! -->
21
22 <!--BEGIN TITLEAREA-->
23 <div id="titlearea">
24 <table cellspacing="0" cellpadding="0">
25 <tbody>
26 <tr style="height: 56px;">
27 <!--BEGIN PROJECT_LOGO-->
28 <td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
29 <!--END PROJECT_LOGO-->
30 <!--BEGIN PROJECT_NAME-->
31 <td id="projectalign" style="padding-left: 0.5em;">
32 <div id="projectname">$projectname
33 <!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
34 </div>
35 <!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
36 </td>
37 <!--END PROJECT_NAME-->
38 <!--BEGIN !PROJECT_NAME-->
39 <!--BEGIN PROJECT_BRIEF-->
40 <td style="padding-left: 0.5em;">
41 <div id="projectbrief">$projectbrief</div>
42 </td>
43 <!--END PROJECT_BRIEF-->
44 <!--END !PROJECT_NAME-->
45 <!--BEGIN DISABLE_INDEX-->
46 <!--BEGIN SEARCHENGINE-->
47 <td>$searchbox</td>
48 <!--END SEARCHENGINE-->
49 <!--END DISABLE_INDEX-->
50 </tr>
51 </tbody>
52 </table>
53 </div>
54 <!--END TITLEAREA-->
55
56 <div id="main-nav">
57 <ul class="sm sm-dox" id="main-menu">
58 <li><a href="index.html">Main Page</a></li>
59 <li><a href="md_doc_tutorial.html">Tutorial</a></li>
60 <li><a href="namespacefoonathan_1_1memory.html">Index</a></li>
61 <li><a href="files.html">Files</a></li>
62 </ul>
63 </div>
64
65 <!-- end header part -->
0 This is the documentation of `foonathan/memory`.
1
2 For a quick start, read the [Tutorial] or skim the examples at the [Github page].
3 The concepts of this library are defined are [here](md_doc_concepts.html).
4
5 ## Features
6
7 New allocator concepts:
8
9 * a `RawAllocator` that is similar to an `Allocator` but easier to use and write
10 * a `BlockAllocator` that is an allocator for huge memory blocks
11
12 Several implementations:
13
14 * `heap_/malloc_/new_allocator`
15 * virtual memory allocators
16 * allocator using a static memory block located on the stack
17 * memory stack
18 * different memory pools
19 * a portable, improved `alloca()` in the form of `temporary_allocator`
20
21 Adapters, wrappers and storage classes:
22
23 * incredible powerful `allocator_traits` allowing `Allocator`s as `RawAllocator`s
24 * `std_allocator` to make a `RawAllocator` an `Allocator` again
25 * adapters for the memory resource TS
26 * `allocator_deleter` classes for smart pointers
27 * (optionally type-erased) `allocator_reference` and other storage classes
28 * memory tracking wrapper
29
30 In addition:
31
32 * container node size debuggers that obtain information about the node size of an STL container at compile-time to specify node sizes for pools
33 * debugging options for leak checking, double-free checks or buffer overflows
34 * customizable error handling routines that can work with exceptions disabled
35 * everything except the STL adapters works on a freestanding environment
36
37 ## Basic example
38
39 ```cpp
40 #include <algorithm>
41 #include <iostream>
42 #include <iterator>
43
44 #include <foonathan/memory/container.hpp> // vector, list, list_node_size,...
45 #include <foonathan/memory/memory_pool.hpp> // memory_pool
46 #include <foonathan/memory/smart_ptr.hpp> // allocate_unique
47 #include <foonathan/memory/static_allocator.hpp> // static_allocator_storage, static_block_allocator
48 #include <foonathan/memory/temporary_allocator.hpp> // temporary_allocator
49
50 // alias namespace foonathan::memory as memory for easier access
51 #include <foonathan/memory/namespace_alias.hpp>
52
53 template <typename BiIter>
54 void merge_sort(BiIter begin, BiIter end);
55
56 int main()
57 {
58 using namespace memory::literals;
59
60 // a memory pool RawAllocator
61 // allocates a memory block - initially 4KiB - and splits it into chunks of list_node_size<int>::value big
62 // list_node_size<int>::value is the size of each node of a std::list
63 memory::memory_pool<> pool(memory::list_node_size<int>::value, 4_KiB);
64
65 // just an alias for std::list<int, memory::std_allocator<int, memory::memory_pool<>>
66 // a std::list using a memory_pool
67 // std_allocator stores a reference to a RawAllocator and provides the Allocator interface
68 memory::list<int, memory::memory_pool<>> list(pool);
69 list.push_back(3);
70 list.push_back(2);
71 list.push_back(1);
72
73 for (auto e : list)
74 std::cout << e << ' ';
75 std::cout << '\n';
76
77 merge_sort(list.begin(), list.end());
78
79 for (auto e : list)
80 std::cout << e << ' ';
81 std::cout << '\n';
82
83 // allocate a std::unique_ptr using the pool
84 // memory::allocate_shared is also available
85 auto ptr = memory::allocate_unique<int>(pool, *list.begin());
86 std::cout << *ptr << '\n';
87
88 // static storage of size 4KiB
89 memory::static_allocator_storage<4_KiB> storage;
90
91 // a memory pool again but this time with a BlockAllocator
92 // this controls the internal allocations of the pool itself
93 // we need to specify the first template parameter giving the type of the pool as well
94 // (node_pool is the default)
95 // we use a static_block_allocator that uses the static storage above
96 // all allocations will use a memory block on the stack
97 using static_pool_t = memory::memory_pool<memory::node_pool, memory::static_block_allocator>;
98 static_pool_t static_pool(memory::unordered_set_node_size<int>::value, 4_KiB, storage);
99
100 // again, just an alias for std::unordered_set<int, std::hash<int>, std::equal_to<int>, memory::std_allocator<int, static_pool_t>
101 // see why I wrote these? :D
102 // now we have a hash set that lives on the stack!
103 memory::unordered_set<int, static_pool_t>
104 set(13, std::hash<int>{}, std::equal_to<int>{},
105 static_pool); // GCC 4.7 is missing the allocator-only ctor, breaks travis :(
106
107 set.insert(3);
108 set.insert(2);
109 set.insert(3); // running out of stack memory is properly handled, of course
110
111 for (auto e : set)
112 std::cout << e << ' ';
113 std::cout << '\n';
114 }
115
116 // naive implementation of merge_sort using temporary memory allocator
117 template <typename BiIter>
118 void merge_sort(BiIter begin, BiIter end)
119 {
120 using value_type = typename std::iterator_traits<BiIter>::value_type;
121
122 auto distance = std::distance(begin, end);
123 if (distance <= 1)
124 return;
125
126 auto mid = begin;
127 std::advance(mid, distance / 2);
128
129 // an allocator for temporary memory
130 // is similar to alloca() but uses its own stack
131 // this stack is thread_local and created the first time it's needed
132 // as soon as the allocator object goes out of scope everything allocated through it will be freed
133 memory::temporary_allocator alloc;
134
135 // alias for std::vector<value_type, memory::std_allocator<value_type, memory::temporary_allocator>>
136 // a std::vector using a temporary_allocator
137 memory::vector<value_type, memory::temporary_allocator> first(begin, mid, alloc),
138 second(mid, end, alloc);
139
140 merge_sort(first.begin(), first.end());
141 merge_sort(second.begin(), second.end());
142 std::merge(first.begin(), first.end(), second.begin(), second.end(), begin);
143 }
144 ```
145
146 See `example/` for more.
147
148 ## Installation
149
150 This library can be used as [CMake] subdirectory.
151 It is tested on GCC 4.7-4.9, Clang 3.4-3.5 and Visual Studio 2013. Newer versions should work too.
152
153 1. Fetch it, e.g. using [git submodules] `git submodule add https://github.com/foonathan/memory ext/memory` and `git submodule update --init --recursive`.
154
155 2. Call `add_subdirectory(ext/memory)` or whatever your local path is to make it available in CMake.
156
157 3. Simply call `target_link_libraries(your_target PUBLIC foonathan_memory)` to link this library and setups the include search path.
158
159 4. You need to activate C++11 at your target, if not already done, you can use [foonathan/compatibility] already available through `add_subdirectory()` and call `comp_target_features(your_target PUBLIC CPP11)`.
160
161 *Note: If during CMake you see an error message that compatibility is
162 not on the newest version, run `git submodule update
163 --recursive --remote` to force the compatiblity submodule of memory to
164 update to the latest version.*
165
166 You can also install the library:
167
168 1. Run `cmake -DCMAKE_BUILD_TYPE="buildtype" -DFOONATHAN_MEMORY_BUILD_EXAMPLES=OFF -DFOONATHAN_MEMORY_BUILD_TESTS=OFF .` inside the library sources.
169
170 2. Run `cmake --build . -- install` to install the library under `${CMAKE_INSTALL_PREFIX}`.
171
172 3. Repeat 1 and 2 for each build type/configuration you want to have (like `Debug`, `RelWithDebInfo` and `Release` or custom names).
173
174 The use an installed library:
175
176 4. Call `find_package(foonathan_memory major.minor REQUIRED)` to find the library.
177
178 5. Call `target_link_libraries(your_target PUBLIC foonathan_memory)` and activate C++11 to link to the library.
179
180 See http://foonathan.github.io/doc/memory/md_doc_installation.html for a detailed guide.
181
182 ## About this documentation
183
184 This documentation is written in a similar way as the C++ standard itself, although not that formal.
185
186 Concepts are documented using the names of the template parameters, for example the following class:
187
188 ~~~{.cpp}
189 template <class Tracker, class RawAllocator>
190 class tracked_allocator;
191 ~~~
192
193 It takes two template parameters, the first must model the [Tracker] concept, the second the [RawAllocator] concept.
194
195 Unless explicitly stated otherwise, it is not allowed to call a function that modifies state from two different threads.
196 Functions that modify state are non-`const` member functions, functions taking a non-`const` reference to objects
197 or functions where it is explictly documented that they change some hidden state.
198
199 If a function is documented as `noexcept`, it does not throw anything.
200 Otherwise it has a *Throws:* clause specifying what it throws, or if it is a forwarding function, the information can be found there (see below).
201
202 If a class is described as [RawAllocator] it automatically has certain semantically information which are not explictly mentioned.
203 This is especially true for the member functions of an [allocator_traits] specialization.
204
205 If a function is described as returning the value of another function or forwarding to it,
206 it implicitly has the requirements and effects from the called function and can also throw the same things.
207
208 [Tutorial]: md_doc_tutorial.html
209 [Github page]: https://github.com/foonathan/memory/
210 [Tracker]: md_doc_concepts.html#concept_tracker
211 [RawAllocator]: md_doc_concepts.html#concept_rawallocator
212 [allocator_traits]: \ref foonathan::memory::allocator_traits
0 # Installation
1
2 This library can either be used [CMake]'s `add_subdirectory()` command or installed globally.
3
4 ## Requirements
5
6 * git
7 * CMake version 3.1 or higher
8 * GCC 4.9 or higher, or clang 3.5 or higher or Visual Studio 14 or higher
9
10 ## As subdirectory of your project
11
12 ### 1. Fetching
13
14 It is recommended to setup a [git submodule] inside your project.
15 Simply run:
16
17 1. `git submodule add https://github.com/foonathan/memory ext/memory`. This will clone the latest commit into a local directory `ext/memory` and registers it as a submodule.
18 2. `git submodule update --init --recursive`. This will fetches the latest commits from all submodules `memory` itself is using.
19
20 If you later want to update your local copy to the latest version, simply run: `git submodule update --recursive --remote`.
21
22 ### 2. CMake Setup
23
24 I am assuming that there is a local copy of the library source files under the path `ext/memory`.
25
26 In your `CMakeLists.txt` place a call to `add_subdirectory(ext/memory)`.
27 This will make all targets of `memory` available inside your CMakeLists.txt.
28 If you want to configure the library, add `set(option value CACHE INTERNAL "" FORCE)` before the `add_subdirectory()` command.
29 See [options] for a list of all configuration options and CMake variables.
30
31 Now the targets is available and to use the library in your application, call to `target_link_libraries(target foonathan_memory)`.
32 This will also setups the include search path of the compiler, as well as other required flags.
33
34 ### 3. Code Usage
35
36 In your code you simply need to include the appropriate headers and you are good to got.
37 Everything is under the subdirectory `foonathan/memory` so write `#include <foonathan/memory/heap_allocator.hpp>` to use the [heap_allocator].
38
39 ## Installing the library
40
41 Download or clone the source for the library version you want to install.
42 You can build the library inside the source directory, it will not be needed after the installation.
43
44 For each build type, run `cmake -DCMAKE_BUILD_TYPE="buildtype" -DFOONATHAN_MEMORY_BUILD_EXAMPLES=OFF -DFOONATHAN_MEMORY_BUILD_TESTS=OFF .` with possible other [options] to configure, then simply `cmake --build . -- install` to build and install the library.
45 This will install the header files under `${CMAKE_INSTALL_PREFIX}/include/foonathan_memory-${major}.${minor}`, the tools under `${CMAKE_INSTALL_PREFIX}/bin` and the build artifacts under `${CMAKE_INSTALL_PREFIX}/lib/foonathan_memory-${major}.${minor}`.
46 By default, `${CMAKE_INSTALL_PREFIX}` is `/usr/local` under UNIX and `C:/Program Files` under Windows,
47 installation may require `sudo`/administrative privileges.
48
49 It is recommended that you install the library for the `Debug`, `RelWithDebInfo` and `Release` build types.
50 Each build type allows different CMake configurations and compiler flags, you can also create your own.
51
52 ## Using an installed library (CMake)
53
54 After you've installed the library, all you need to call is `find_package(foonathan_memory major.minor REQUIRED)` to find it.
55 This will look for a library installation of a compatible version and the same build type as your project,
56 i.e. if you compile under build type `Debug`, it will also match the `Debug` library.
57 This *should* work without your help, otherwise it will tell you what to do.
58
59 A `0.x` version requires an exact match in the call to `find_package()`, otherwise a library with same major version and a higher minor version is also compatible.
60 If you want only exact version matches add the `EXACT` flag to `find_package()`.
61
62 If a right library version/configuration cannot be found, this is an error due to the `REQUIRED`.
63 If this is not what you want, leave it out and do conditionals based on the CMake variable `foonathan_memory_FOUND`.
64 In the source code, all targets linking to the library have the macro `FOONATHAN_MEMORY` defined automatically,
65 as `FOONATHAN_MEMORY_VERSION_MAJOR/MINOR`.
66 Use conditional compilation with them.
67
68 After that you can link to the library by calling `target_link_libraries(your_target PUBLIC foonathan_memory)`.
69 This setups everything needed.
70
71 Then simply include the headers, everything is under the subdirectory `foonathan/memory` so write `#include <foonathan/memory/heap_allocator.hpp>` to use the [heap_allocator].
72
73 ## Using an installed library (other buildsystems)
74
75 To use the library with other build-systems, add `${CMAKE_INSTALL_PREFIX}/include/foonathan_memory-${major}.${minor}` and `${CMAKE_INSTALL_PREFIX}/lib/foonathan_memory-${major}.${minor}/${CMAKE_BUILD_TYPE}` to your include directories path.
76 Link to the library file in `${CMAKE_INSTALL_PREFIX}/lib/foonathan_memory-${major}.${minor}/${CMAKE_BUILD_TYPE}` and enable the right C++ standard for your configuration.
77
78 You should also globally define the `FOONATHAN_MEMORY` macro as `1` and the `FOONATHAN_MEMORY_VERSION_MAJOR/MINOR` macros as the corresponding values.
79
80 [CMake]: www.cmake.org
81 [git submodule]: http://git-scm.com/docs/git-submodule
82 [compatibility library]: https://github.com/foonathan/compatibility
83 [heap_allocator]: \ref foonathan::memory::heap_allocator
84 [options]: md_doc_options.html
0 # Writing classes using a RawAllocator
1
2 Compared to the requirements an `AllocatorAwareContainer` has to fulfill,
3 it is very easy to use a `RawAllocator` in a container.
4 There is no need to worry about comparing allocators, `select_on_container_copy_construction()`,
5 `propagate_on_container_move_assignment` or the undefined behavior that sometimes happens if you `swap()` a container.
6
7 ## The Allocator version
8
9 To demonstrate this, consider a simple `deep_copy_ptr`. `deep_copy_ptr` is like `std::unique_ptr` but provides a copy constructor
10 which will perform a copy of the object.
11 Unlike `std::unique_ptr` it will take a full-blown `Allocator`. Then it will be transformed to use a [RawAllocator].
12 It is only meant to demonstrate the use of allocator classes and not to be a real use smart pointer class
13 (it is pretty dumb, it copies the pointee on copy but invalidates on move...).
14 So, this is it:
15
16 ```cpp
17 template <typename T, class Allocator = std::allocator<T>>
18 class deep_copy_ptr
19 : Allocator
20 {
21 using traits = std::allocator_traits<Allocator>;
22 public:
23 using value_type = typename traits::value_type;
24 using allocator_type = Allocator;
25
26 explicit deep_copy_ptr(const allocator_type &alloc = allocator_type{})
27 : allocator_type(alloc), ptr_(nullptr) {}
28
29 deep_copy_ptr(value_type value, const allocator_type &alloc = allocator_type{})
30 : allocator_type(alloc), ptr_(create(*this, std::move(value))) {}
31
32 deep_copy_ptr(const deep_copy_ptr &other)
33 : allocator_type(traits::select_on_container_copy_construction(other)),
34 ptr_(create(*this, *other))
35 {}
36
37 deep_copy_ptr(deep_copy_ptr &&other) noexcept
38 : allocator_type(std::move(other)),
39 ptr_(other.ptr_)
40 {
41 other.ptr_ = nullptr;
42 }
43
44 ~deep_copy_ptr() noexcept
45 {
46 destroy();
47 }
48
49 deep_copy_ptr& operator=(const deep_copy_ptr &other)
50 {
51 if (traits::propagate_on_container_copy_assignment::value && static_cast<Allocator&>(*this) != other)
52 {
53 allocator_type alloc(other);
54 auto ptr = create(alloc, *other);
55 destroy();
56
57 Allocator::operator=(std::move(alloc));
58 ptr_ = ptr;
59 }
60 else
61 {
62 auto ptr = create(*this, *other);
63 destroy();
64 ptr_ = ptr;
65 }
66 return *this;
67 }
68
69 deep_copy_ptr& operator=(deep_copy_ptr &&other) noexcept(traits::propagate_on_container_move_assignment::value)
70 {
71 if (traits::propagate_on_container_move_assignment::value && static_cast<allocator_type&>(*this) != other)
72 {
73 allocator_type::operator=(std::move(other));
74 ptr_ = other.ptr_;
75 other.ptr_ = nullptr;
76 }
77 else if (static_cast<allocator_type&>(*this) == other)
78 {
79 ptr_ = other.ptr_;
80 other.ptr_ = nullptr;
81 }
82 else
83 {
84 auto ptr = create(*this, std::move(*other));
85 destroy();
86 ptr_ = ptr;
87 }
88 return *this;
89 }
90
91 friend void swap(deep_copy_ptr &a, deep_copy_ptr &b) noexcept
92 {
93 using std::swap;
94 if (traits::propagate_on_container_swap::value)
95 swap(static_cast<allocator_type&>(a), static_cast<allocator_type&>(b));
96 else
97 assert(static_cast<allocator_type&>(a) == b);
98 swap(a.ptr_, b.ptr_);
99 }
100
101 explicit operator bool() const
102 {
103 return !!ptr_;
104 }
105
106 T& operator*()
107 {
108 return *ptr_;
109 }
110
111 const T& operator*() const
112 {
113 return *ptr_;
114 }
115
116 typename traits::pointer operator->()
117 {
118 return ptr_;
119 }
120
121 typename traits::const_pointer operator->() const
122 {
123 return ptr_;
124 }
125
126 private:
127 template <typename ... Args>
128 typename traits::pointer create(allocator_type &alloc, Args&&... args)
129 {
130 auto ptr = traits::allocate(alloc, 1);
131 try
132 {
133 traits::construct(alloc, ptr, std::forward<Args>(args)...);
134 }
135 catch (...)
136 {
137 traits::deallocate(alloc, ptr, 1);
138 throw;
139 }
140 return ptr;
141 }
142
143 void destroy() noexcept
144 {
145 if (ptr_)
146 {
147 traits::destroy(*this, ptr_);
148 traits::deallocate(*this, ptr_, 1);
149 }
150 }
151
152 typename traits::pointer ptr_;
153 };
154 ```
155
156 I am not going to go into much detail about this code, since it is just to demonstrate the complexity involved with the `Allocator` model.
157 To note is the following:
158
159 * The `Allocator` is inherited privately to use the empty base optimization if it is an empty type.
160 Also the allocator is *owned* by the pointer.
161 * All access to the `Allocator` is done through the `std::allocator_traits` class.
162 In addition, the actual `value_type` and pointer must be obtained from the traits class and its appropriate functions called to construct/destroy the object.
163 * The copy constructor must call `traits::select_on_container_copy_construction()`, the move constructor can just move the allocator.
164 * Copy and Move assignment and `swap()` only exchange the container if the appropriate `traits::propagate_on_container_XXX` is `true`.
165 This involves a lot of complexity since if it is `false` - which is the default! - the old memory has to be deallocated on the old allocator
166 and the new memory allocated on the new allocator if the allocators aren't *equal* - even for move!
167 Also note the `assert()` in `swap()`: Since `swap()` must not throw, it cannot do the reallocation if the propagation is `false`.
168
169 ## The RawAllocator version
170
171 This is now a step-by-step review of the changes in the version that uses a [RawAllocator].
172
173 ```cpp
174 template <typename T, class RawAllocator = memory::default_allocator>
175 class deep_copy_ptr
176 : memory::allocator_reference<RawAllocator>
177 ```
178 The default allocator is now [default_allocator]. Its actual type can be changed when building this library,
179 but it is similar to `std::allocator`.
180 Also the allocator is stored in a [allocator_reference].
181 This is recommended for three reasons:
182
183 a) Usage requirement: `RawAllocator` classes are only required to be moveable. [allocator_reference] is copyable, this allows copying the `deep_copy_ptr`.
184
185 b) Simplicity: [allocator_reference] provides the full interface without using the [allocator_traits] class.
186 It has already done the wrapping for you.
187
188 c) Ownership: The `deep_copy_ptr` doesn't *own* the allocator, it can be shared with other classes or objects.
189 This is a useful semantic change which is often required anyway.
190 *Note: The passed allocator object must now live as long as the container object, except for stateless allocators!*
191
192 The reference is inherited too for the same reason:
193 It is empty for stateless allocators. They are constructed on-the-fly.
194 This also means that they can be passed in as a temporary.
195 For stateful allocators it stores a pointer. The user has to ensure that the referenced allocator object then outlives the `deep_copy_ptr` object.
196
197 ```cpp
198 using allocator_ref = memory::allocator_reference<RawAllocator>;
199 public:
200 using value_type = T;
201 using allocator_type = typename allocator_ref::allocator_type;
202
203 explicit deep_copy_ptr(allocator_ref alloc = allocator_type{})
204 : allocator_ref(alloc), ptr_(nullptr) {}
205
206 deep_copy_ptr(value_type value, allocator_ref alloc = allocator_type{})
207 : allocator_ref(alloc), ptr_(create(std::move(value))) {}
208
209 deep_copy_ptr(const deep_copy_ptr &other)
210 : allocator_ref(other),
211 ptr_(create(*other))
212 {}
213
214 deep_copy_ptr(deep_copy_ptr &&other) noexcept
215 : allocator_ref(std::move(other)),
216 ptr_(other.ptr_)
217 {
218 other.ptr_ = nullptr;
219 }
220 ```
221
222 Not much changed with the typedefs: The traits typedef can be removed, instead there is one for the reference.
223 The `value_type` is now the template parameter directly but the `allocator_type` is defined in the reference through the traits.
224 This allows rebinding to support `Allocator` classes.
225
226 The constructors now take an `allocator_ref` instead of the `allocator_type` directly but otherwise are left unchanged.
227 Note that the assignment of a default constructed `allocator_type` only compiles for stateless allocators,
228 since the reference does not actual store a reference to them. For stateful it wil not compile.
229 Since only the reference is copied and not the allocator there is no need for a special treatment in copying.
230 `create()` no longer needs to take an allocator as reference so this argument can be omitted.
231
232 The destructor has not changed at all, it still only calls the helper function `destroy()`.
233
234 Copy and move assignment operators can now use the copy(move)-and-swap-idiom and do not need to worry about all the propagation stuff
235 since the allocator is held by reference. Same goes for `swap()` which just swaps the reference and pointer.
236
237 The accessor functions have not changed, except that the actual pointer type is now simply `T*` and no longer defined in the traits.
238
239 ```cpp
240 template <typename ... Args>
241 T* create(Args&&... args)
242 {
243 auto storage = this->allocate_node(sizeof(T), alignof(T));
244 try
245 {
246 ::new(storage) T(std::forward<Args>(args)...);
247 }
248 catch (...)
249 {
250 this->deallocate_node(storage, sizeof(T), alignof(T));
251 throw;
252 }
253 return static_cast<T*>(storage);
254 }
255
256 void destroy() noexcept
257 {
258 if (ptr_)
259 {
260 ptr_->~T();
261 this->deallocate_node(ptr_, sizeof(T), alignof(T));
262 }
263 }
264 ```
265
266 The helper functions `create()` and `destroy()` no only perform the (de-)allocation through the allocator,
267 constructor/destructor call is done manually. Note that the pointer returned by `allocate_node()` is `void*`
268 and that you have to explicitly specify `this->` due to the template name lookup rules.
269
270 This is now the full `RawAllocator` version of `deep_copy_ptr`:
271
272 ```cpp
273 template <typename T, class RawAllocator = memory::default_allocator>
274 class deep_copy_ptr
275 : memory::allocator_reference<RawAllocator>
276 {
277 using allocator_ref = memory::allocator_reference<RawAllocator>;
278 public:
279 using value_type = T;
280 using allocator_type = typename allocator_ref::allocator_type;
281
282 explicit deep_copy_ptr(allocator_ref alloc = allocator_type{})
283 : allocator_ref(alloc), ptr_(nullptr) {}
284
285 deep_copy_ptr(value_type value, allocator_ref alloc = allocator_type{})
286 : allocator_ref(alloc), ptr_(create(std::move(value))) {}
287
288 deep_copy_ptr(const deep_copy_ptr &other)
289 : allocator_ref(other),
290 ptr_(create(*other))
291 {}
292
293 deep_copy_ptr(deep_copy_ptr &&other) noexcept
294 : allocator_ref(std::move(other)),
295 ptr_(other.ptr_)
296 {
297 other.ptr_ = nullptr;
298 }
299
300 ~deep_copy_ptr() noexcept
301 {
302 destroy();
303 }
304
305 deep_copy_ptr& operator=(const deep_copy_ptr &other)
306 {
307 deep_copy_ptr tmp(other);
308 swap(*this, tmp);
309 return *this;
310 }
311
312 deep_copy_ptr& operator=(deep_copy_ptr &&other) noexcept
313 {
314 deep_copy_ptr tmp(std::move(other));
315 swap(*this, tmp);
316 return *this;
317 }
318
319 friend void swap(deep_copy_ptr &a, deep_copy_ptr &b) noexcept
320 {
321 using std::swap;
322 swap(static_cast<allocator_ref&>(a), static_cast<allocator_ref&>(b));
323 swap(a.ptr_, b.ptr_);
324 }
325
326 explicit operator bool() const
327 {
328 return !!ptr_;
329 }
330
331 T& operator*()
332 {
333 return *ptr_;
334 }
335
336 const T& operator*() const
337 {
338 return *ptr_;
339 }
340
341 T* operator->()
342 {
343 return ptr_;
344 }
345
346 const T* operator->() const
347 {
348 return ptr_;
349 }
350
351 private:
352 template <typename ... Args>
353 T* create(Args&&... args)
354 {
355 auto storage = this->allocate_node(sizeof(T), alignof(T));
356 try
357 {
358 ::new(storage) T(std::forward<Args>(args)...);
359 }
360 catch (...)
361 {
362 this->deallocate_node(storage, sizeof(T), alignof(T));
363 throw;
364 }
365 return static_cast<T*>(storage);
366 }
367
368 void destroy() noexcept
369 {
370 if (ptr_)
371 {
372 ptr_->~T();
373 this->deallocate_node(ptr_, sizeof(T), alignof(T));
374 }
375 }
376
377 T *ptr_;
378 };
379 ```
380
381 [default_allocator]: \ref foonathan::memory::default_allocator
382 [allocator_reference]: \ref foonathan::memory::allocator_reference
383 [allocator_traits]: \ref foonathan::memory::allocator_traits
384 [allocator_deallocator]: \ref foonathan::memory::allocator_deallocator
385 [RawAllocator]: md_doc_concepts.html#concept_rawallocator
0 # CMake options for configuring the library
1
2 When installing the library, each build type (`-DCMAKE_BUILD_TYPE=XXX`) allows a separate configuration.
3 The configuration doesn't affect your targets, you can link to any configuration (your compiler might complain though, if you link your debug target to a library compiled in release mode or vice-versa).
4
5 The build types `Debug`, `RelWithDebInfo` and `Release` provide pre-defined values for the debugging options,
6 those cannot be overridden.
7
8 You can create as many build types as you want.
9
10 ## Options
11
12 There are the following variables available to configure it:
13
14 * `COMP_HAS_*`: specifies compatibility options, that is, whether a certain C++ feature is available under your compiler. They are automatically detected by CMake, so there is usually no need to change them.
15
16 * `FOONATHAN_MEMORY_BUILD_EXAMPLES/_TESTS`: whether or not to build examples or tests. If this is `OFF` their CMake scripts are not even included. It is `ON` for standalone builds and `OFF` if used in `add_subdirectory()`.
17 * `FOONATHAN_MEMORY_BUILD_TOOLS`: whether or not build the tools. Unlike the other two options, this is always `ON`.
18
19 * `FOONATHAN_MEMORY_EXTERN_TEMPLATE`: If active the library provides already the definition of common instantiations of its class templates. This can speed up compilation time of user code since the compiler does not need to generate the definition each time the class instantiation is used (this compilation time is done when compiling the library and the size of the library binary increases). Default is `ON`.
20
21 * `FOONATHAN_MEMORY_DEFAULT_ALLOCATOR`: The default allocator used by the higher level allocator classes. One of the low level allocators (see \ref foonathan::memory::default_allocator). Default is `heap_allocator`.
22 * `FOONATHAN_MEMORY_TEMPORARY_STACK_MODE`: The `temporary_allocator` uses a `temporary_stack` for its allocation.
23 This option controls how and if a global, per-thread instance of it is managed.
24 If `2` it is automatically managed and created on-demand, if `1` you need explicit lifetime control through the `temporary_stack_initializer` class and if `0` there is no stack created automatically.
25 Mode `2` has a slight runtime overhead.
26
27 * `FOONATHAN_MEMORY_DEBUG_*`: Specifies debugging options such as pointer check in `deallocate()` or filling newly allocated memory with values. They are set automatically for certain build types and cannot be overriden: All of them are enabled in `Debug` builds, the faster ones in `RelWithDebInfo` and none in `Release`. See [debugging](md_doc_debug_error.html#debugging) for a detailed description.
28
29 A list of all options with description is generated by calling `cmake -LH`.
30
31 ## Variables and targets
32
33 The following variables or targets are available if used with `add_subdirectory()`:
34
35 * `FOONATHAN_MEMORY_INCLUDE_DIR` (variable): The include directory for the header files.
36 * `FOONATHAN_MEMORY_VERSION[_MAJOR/MINOR]` (variable): Major and minor version of the library.
37 * `foonathan_memory` (target): The target of the library you can link to.
38 * `foonathan_memory_example_*` (target): The targets for the examples. Only available if `FOONATHAN_MEMORY_BUILD_EXAMPLES` is `ON`.
39 * `foonathan_memory_test` (target): The test target. Only available if `FOONATHAN_MEMORY_BUILD_TESTS` is `ON`.
40 * `foonathan_memory_profiling` (target): The profiling target. Only available if `FOONATHAN_MEMORY_BUILD_TESTS` is `ON`.
41 * `foonathan_memory_node_size_debugger` (target): The target that generates the container node size information. Only available if `FOONATHAN_MEMORY_BUILD_TOOLS` is `ON`.
42
43 Also every function from [foonathan/compatibility] is exposed.
44
45 [foonathan/compatibility]: https://github.com/foonathan/compatiblity
0 #!/bin/sh
1 # builds documentation and publishes it
2 # run in root of repository, assumes `git worktree add doc/html gh-pages`
3
4 doxygen doc/Doxyfile
5 cd doc/html
6 git add --all
7 git commit -am"Update documentation"
8 git push --force origin gh-pages
9
0 /* The standard CSS for doxygen 1.8.16 */
1
2 body, table, div, p, dl {
3 font: 400 14px/22px Roboto,sans-serif;
4 }
5
6 p.reference, p.definition {
7 font: 400 14px/22px Roboto,sans-serif;
8 }
9
10 /* @group Heading Levels */
11
12 h1.groupheader {
13 font-size: 150%;
14 }
15
16 .title {
17 font: 400 14px/28px Roboto,sans-serif;
18 font-size: 150%;
19 font-weight: bold;
20 margin: 10px 2px;
21 }
22
23 h2.groupheader {
24 border-bottom: 1px solid #879ECB;
25 color: #354C7B;
26 font-size: 150%;
27 font-weight: normal;
28 margin-top: 1.75em;
29 padding-top: 8px;
30 padding-bottom: 4px;
31 width: 100%;
32 }
33
34 h3.groupheader {
35 font-size: 100%;
36 }
37
38 h1, h2, h3, h4, h5, h6 {
39 -webkit-transition: text-shadow 0.5s linear;
40 -moz-transition: text-shadow 0.5s linear;
41 -ms-transition: text-shadow 0.5s linear;
42 -o-transition: text-shadow 0.5s linear;
43 transition: text-shadow 0.5s linear;
44 margin-right: 15px;
45 }
46
47 h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow {
48 text-shadow: 0 0 15px cyan;
49 }
50
51 dt {
52 font-weight: bold;
53 }
54
55 ul.multicol {
56 -moz-column-gap: 1em;
57 -webkit-column-gap: 1em;
58 column-gap: 1em;
59 -moz-column-count: 3;
60 -webkit-column-count: 3;
61 column-count: 3;
62 }
63
64 p.startli, p.startdd {
65 margin-top: 2px;
66 }
67
68 p.starttd {
69 margin-top: 0px;
70 }
71
72 p.endli {
73 margin-bottom: 0px;
74 }
75
76 p.enddd {
77 margin-bottom: 4px;
78 }
79
80 p.endtd {
81 margin-bottom: 2px;
82 }
83
84 p.interli {
85 }
86
87 p.interdd {
88 }
89
90 p.intertd {
91 }
92
93 /* @end */
94
95 caption {
96 font-weight: bold;
97 }
98
99 span.legend {
100 font-size: 70%;
101 text-align: center;
102 }
103
104 h3.version {
105 font-size: 90%;
106 text-align: center;
107 }
108
109 div.qindex, div.navtab{
110 background-color: #EBEFF6;
111 border: 1px solid #A3B4D7;
112 text-align: center;
113 }
114
115 div.qindex, div.navpath {
116 width: 100%;
117 line-height: 140%;
118 }
119
120 div.navtab {
121 margin-right: 15px;
122 }
123
124 /* @group Link Styling */
125
126 a {
127 color: #3D578C;
128 font-weight: normal;
129 text-decoration: none;
130 }
131
132 .contents a:visited {
133 color: #4665A2;
134 }
135
136 a:hover {
137 text-decoration: underline;
138 }
139
140 a.qindex {
141 font-weight: bold;
142 }
143
144 a.qindexHL {
145 font-weight: bold;
146 background-color: #9CAFD4;
147 color: #FFFFFF;
148 border: 1px double #869DCA;
149 }
150
151 .contents a.qindexHL:visited {
152 color: #FFFFFF;
153 }
154
155 a.el {
156 font-weight: bold;
157 }
158
159 a.elRef {
160 }
161
162 a.code, a.code:visited, a.line, a.line:visited {
163 color: #4665A2;
164 }
165
166 a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited {
167 color: #4665A2;
168 }
169
170 /* @end */
171
172 dl.el {
173 margin-left: -1cm;
174 }
175
176 ul {
177 overflow: hidden; /*Fixed: list item bullets overlap floating elements*/
178 }
179
180 #side-nav ul {
181 overflow: visible; /* reset ul rule for scroll bar in GENERATE_TREEVIEW window */
182 }
183
184 #main-nav ul {
185 overflow: visible; /* reset ul rule for the navigation bar drop down lists */
186 }
187
188 .fragment {
189 text-align: left;
190 direction: ltr;
191 overflow-x: auto; /*Fixed: fragment lines overlap floating elements*/
192 overflow-y: hidden;
193 }
194
195 pre.fragment {
196 border: 1px solid #C4CFE5;
197 background-color: #FBFCFD;
198 padding: 4px 6px;
199 margin: 4px 8px 4px 2px;
200 overflow: auto;
201 word-wrap: break-word;
202 font-size: 9pt;
203 line-height: 125%;
204 font-family: monospace, fixed;
205 font-size: 105%;
206 }
207
208 div.fragment {
209 padding: 0 0 1px 0; /*Fixed: last line underline overlap border*/
210 margin: 4px 8px 4px 2px;
211 background-color: #FBFCFD;
212 border: 1px solid #C4CFE5;
213 }
214
215 div.line {
216 font-family: monospace, fixed;
217 font-size: 13px;
218 min-height: 13px;
219 line-height: 1.0;
220 text-wrap: unrestricted;
221 white-space: -moz-pre-wrap; /* Moz */
222 white-space: -pre-wrap; /* Opera 4-6 */
223 white-space: -o-pre-wrap; /* Opera 7 */
224 white-space: pre-wrap; /* CSS3 */
225 word-wrap: break-word; /* IE 5.5+ */
226 text-indent: -53px;
227 padding-left: 53px;
228 padding-bottom: 0px;
229 margin: 0px;
230 -webkit-transition-property: background-color, box-shadow;
231 -webkit-transition-duration: 0.5s;
232 -moz-transition-property: background-color, box-shadow;
233 -moz-transition-duration: 0.5s;
234 -ms-transition-property: background-color, box-shadow;
235 -ms-transition-duration: 0.5s;
236 -o-transition-property: background-color, box-shadow;
237 -o-transition-duration: 0.5s;
238 transition-property: background-color, box-shadow;
239 transition-duration: 0.5s;
240 }
241
242 div.line:after {
243 content:"\000A";
244 white-space: pre;
245 }
246
247 div.line.glow {
248 background-color: cyan;
249 box-shadow: 0 0 10px cyan;
250 }
251
252
253 span.lineno {
254 padding-right: 4px;
255 text-align: right;
256 border-right: 2px solid #0F0;
257 background-color: #E8E8E8;
258 white-space: pre;
259 }
260 span.lineno a {
261 background-color: #D8D8D8;
262 }
263
264 span.lineno a:hover {
265 background-color: #C8C8C8;
266 }
267
268 .lineno {
269 -webkit-touch-callout: none;
270 -webkit-user-select: none;
271 -khtml-user-select: none;
272 -moz-user-select: none;
273 -ms-user-select: none;
274 user-select: none;
275 }
276
277 div.ah, span.ah {
278 background-color: black;
279 font-weight: bold;
280 color: #FFFFFF;
281 margin-bottom: 3px;
282 margin-top: 3px;
283 padding: 0.2em;
284 border: solid thin #333;
285 border-radius: 0.5em;
286 -webkit-border-radius: .5em;
287 -moz-border-radius: .5em;
288 box-shadow: 2px 2px 3px #999;
289 -webkit-box-shadow: 2px 2px 3px #999;
290 -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
291 background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444));
292 background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000 110%);
293 }
294
295 div.classindex ul {
296 list-style: none;
297 padding-left: 0;
298 }
299
300 div.classindex span.ai {
301 display: inline-block;
302 }
303
304 div.groupHeader {
305 margin-left: 16px;
306 margin-top: 12px;
307 font-weight: bold;
308 }
309
310 div.groupText {
311 margin-left: 16px;
312 font-style: italic;
313 }
314
315 body {
316 background-color: white;
317 color: black;
318 margin: 0;
319 }
320
321 div.contents {
322 margin-top: 10px;
323 margin-left: 12px;
324 margin-right: 8px;
325 }
326
327 td.indexkey {
328 background-color: #EBEFF6;
329 font-weight: bold;
330 border: 1px solid #C4CFE5;
331 margin: 2px 0px 2px 0;
332 padding: 2px 10px;
333 white-space: nowrap;
334 vertical-align: top;
335 }
336
337 td.indexvalue {
338 background-color: #EBEFF6;
339 border: 1px solid #C4CFE5;
340 padding: 2px 10px;
341 margin: 2px 0px;
342 }
343
344 tr.memlist {
345 background-color: #EEF1F7;
346 }
347
348 p.formulaDsp {
349 text-align: center;
350 }
351
352 img.formulaDsp {
353
354 }
355
356 img.formulaInl, img.inline {
357 vertical-align: middle;
358 }
359
360 div.center {
361 text-align: center;
362 margin-top: 0px;
363 margin-bottom: 0px;
364 padding: 0px;
365 }
366
367 div.center img {
368 border: 0px;
369 }
370
371 address.footer {
372 text-align: right;
373 padding-right: 12px;
374 }
375
376 img.footer {
377 border: 0px;
378 vertical-align: middle;
379 }
380
381 /* @group Code Colorization */
382
383 span.keyword {
384 color: #008000
385 }
386
387 span.keywordtype {
388 color: #604020
389 }
390
391 span.keywordflow {
392 color: #e08000
393 }
394
395 span.comment {
396 color: #800000
397 }
398
399 span.preprocessor {
400 color: #806020
401 }
402
403 span.stringliteral {
404 color: #002080
405 }
406
407 span.charliteral {
408 color: #008080
409 }
410
411 span.vhdldigit {
412 color: #ff00ff
413 }
414
415 span.vhdlchar {
416 color: #000000
417 }
418
419 span.vhdlkeyword {
420 color: #700070
421 }
422
423 span.vhdllogic {
424 color: #ff0000
425 }
426
427 blockquote {
428 background-color: #F7F8FB;
429 border-left: 2px solid #9CAFD4;
430 margin: 0 24px 0 4px;
431 padding: 0 12px 0 16px;
432 }
433
434 blockquote.DocNodeRTL {
435 border-left: 0;
436 border-right: 2px solid #9CAFD4;
437 margin: 0 4px 0 24px;
438 padding: 0 16px 0 12px;
439 }
440
441 /* @end */
442
443 /*
444 .search {
445 color: #003399;
446 font-weight: bold;
447 }
448
449 form.search {
450 margin-bottom: 0px;
451 margin-top: 0px;
452 }
453
454 input.search {
455 font-size: 75%;
456 color: #000080;
457 font-weight: normal;
458 background-color: #e8eef2;
459 }
460 */
461
462 td.tiny {
463 font-size: 75%;
464 }
465
466 .dirtab {
467 padding: 4px;
468 border-collapse: collapse;
469 border: 1px solid #A3B4D7;
470 }
471
472 th.dirtab {
473 background: #EBEFF6;
474 font-weight: bold;
475 }
476
477 hr {
478 height: 0px;
479 border: none;
480 border-top: 1px solid #4A6AAA;
481 }
482
483 hr.footer {
484 height: 1px;
485 }
486
487 /* @group Member Descriptions */
488
489 table.memberdecls {
490 border-spacing: 0px;
491 padding: 0px;
492 }
493
494 .memberdecls td, .fieldtable tr {
495 -webkit-transition-property: background-color, box-shadow;
496 -webkit-transition-duration: 0.5s;
497 -moz-transition-property: background-color, box-shadow;
498 -moz-transition-duration: 0.5s;
499 -ms-transition-property: background-color, box-shadow;
500 -ms-transition-duration: 0.5s;
501 -o-transition-property: background-color, box-shadow;
502 -o-transition-duration: 0.5s;
503 transition-property: background-color, box-shadow;
504 transition-duration: 0.5s;
505 }
506
507 .memberdecls td.glow, .fieldtable tr.glow {
508 background-color: cyan;
509 box-shadow: 0 0 15px cyan;
510 }
511
512 .mdescLeft, .mdescRight,
513 .memItemLeft, .memItemRight,
514 .memTemplItemLeft, .memTemplItemRight, .memTemplParams {
515 background-color: #F9FAFC;
516 border: none;
517 margin: 4px;
518 padding: 1px 0 0 8px;
519 }
520
521 .mdescLeft, .mdescRight {
522 padding: 0px 8px 4px 8px;
523 color: #555;
524 }
525
526 .memSeparator {
527 border-bottom: 1px solid #DEE4F0;
528 line-height: 1px;
529 margin: 0px;
530 padding: 0px;
531 }
532
533 .memItemLeft, .memTemplItemLeft {
534 white-space: nowrap;
535 }
536
537 .memItemRight {
538 width: 100%;
539 }
540
541 .memTemplParams {
542 color: #4665A2;
543 white-space: nowrap;
544 font-size: 80%;
545 }
546
547 /* @end */
548
549 /* @group Member Details */
550
551 /* Styles for detailed member documentation */
552
553 .memtitle {
554 padding: 8px;
555 border-top: 1px solid #A8B8D9;
556 border-left: 1px solid #A8B8D9;
557 border-right: 1px solid #A8B8D9;
558 border-top-right-radius: 4px;
559 border-top-left-radius: 4px;
560 margin-bottom: -1px;
561 background-image: url('nav_f.png');
562 background-repeat: repeat-x;
563 background-color: #E2E8F2;
564 line-height: 1.25;
565 font-weight: 300;
566 float:left;
567 }
568
569 .permalink
570 {
571 font-size: 65%;
572 display: inline-block;
573 vertical-align: middle;
574 }
575
576 .memtemplate {
577 font-size: 80%;
578 color: #4665A2;
579 font-weight: normal;
580 margin-left: 9px;
581 }
582
583 .memnav {
584 background-color: #EBEFF6;
585 border: 1px solid #A3B4D7;
586 text-align: center;
587 margin: 2px;
588 margin-right: 15px;
589 padding: 2px;
590 }
591
592 .mempage {
593 width: 100%;
594 }
595
596 .memitem {
597 padding: 0;
598 margin-bottom: 10px;
599 margin-right: 5px;
600 -webkit-transition: box-shadow 0.5s linear;
601 -moz-transition: box-shadow 0.5s linear;
602 -ms-transition: box-shadow 0.5s linear;
603 -o-transition: box-shadow 0.5s linear;
604 transition: box-shadow 0.5s linear;
605 display: table !important;
606 width: 100%;
607 }
608
609 .memitem.glow {
610 box-shadow: 0 0 15px cyan;
611 }
612
613 .memname {
614 font-weight: 400;
615 margin-left: 6px;
616 }
617
618 .memname td {
619 vertical-align: bottom;
620 }
621
622 .memproto, dl.reflist dt {
623 border-top: 1px solid #A8B8D9;
624 border-left: 1px solid #A8B8D9;
625 border-right: 1px solid #A8B8D9;
626 padding: 6px 0px 6px 0px;
627 color: #253555;
628 font-weight: bold;
629 text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
630 background-color: #DFE5F1;
631 /* opera specific markup */
632 box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
633 border-top-right-radius: 4px;
634 /* firefox specific markup */
635 -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
636 -moz-border-radius-topright: 4px;
637 /* webkit specific markup */
638 -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
639 -webkit-border-top-right-radius: 4px;
640
641 }
642
643 .overload {
644 font-family: "courier new",courier,monospace;
645 font-size: 65%;
646 }
647
648 .memdoc, dl.reflist dd {
649 border-bottom: 1px solid #A8B8D9;
650 border-left: 1px solid #A8B8D9;
651 border-right: 1px solid #A8B8D9;
652 padding: 6px 10px 2px 10px;
653 background-color: #FBFCFD;
654 border-top-width: 0;
655 background-image:url('nav_g.png');
656 background-repeat:repeat-x;
657 background-color: #FFFFFF;
658 /* opera specific markup */
659 border-bottom-left-radius: 4px;
660 border-bottom-right-radius: 4px;
661 box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
662 /* firefox specific markup */
663 -moz-border-radius-bottomleft: 4px;
664 -moz-border-radius-bottomright: 4px;
665 -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
666 /* webkit specific markup */
667 -webkit-border-bottom-left-radius: 4px;
668 -webkit-border-bottom-right-radius: 4px;
669 -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
670 }
671
672 dl.reflist dt {
673 padding: 5px;
674 }
675
676 dl.reflist dd {
677 margin: 0px 0px 10px 0px;
678 padding: 5px;
679 }
680
681 .paramkey {
682 text-align: right;
683 }
684
685 .paramtype {
686 white-space: nowrap;
687 }
688
689 .paramname {
690 color: #602020;
691 white-space: nowrap;
692 }
693 .paramname em {
694 font-style: normal;
695 }
696 .paramname code {
697 line-height: 14px;
698 }
699
700 .params, .retval, .exception, .tparams {
701 margin-left: 0px;
702 padding-left: 0px;
703 }
704
705 .params .paramname, .retval .paramname, .tparams .paramname, .exception .paramname {
706 font-weight: bold;
707 vertical-align: top;
708 }
709
710 .params .paramtype, .tparams .paramtype {
711 font-style: italic;
712 vertical-align: top;
713 }
714
715 .params .paramdir, .tparams .paramdir {
716 font-family: "courier new",courier,monospace;
717 vertical-align: top;
718 }
719
720 table.mlabels {
721 border-spacing: 0px;
722 }
723
724 td.mlabels-left {
725 width: 100%;
726 padding: 0px;
727 }
728
729 td.mlabels-right {
730 vertical-align: bottom;
731 padding: 0px;
732 white-space: nowrap;
733 }
734
735 span.mlabels {
736 margin-left: 8px;
737 }
738
739 span.mlabel {
740 background-color: #728DC1;
741 border-top:1px solid #5373B4;
742 border-left:1px solid #5373B4;
743 border-right:1px solid #C4CFE5;
744 border-bottom:1px solid #C4CFE5;
745 text-shadow: none;
746 color: white;
747 margin-right: 4px;
748 padding: 2px 3px;
749 border-radius: 3px;
750 font-size: 7pt;
751 white-space: nowrap;
752 vertical-align: middle;
753 }
754
755
756
757 /* @end */
758
759 /* these are for tree view inside a (index) page */
760
761 div.directory {
762 margin: 10px 0px;
763 border-top: 1px solid #9CAFD4;
764 border-bottom: 1px solid #9CAFD4;
765 width: 100%;
766 }
767
768 .directory table {
769 border-collapse:collapse;
770 }
771
772 .directory td {
773 margin: 0px;
774 padding: 0px;
775 vertical-align: top;
776 }
777
778 .directory td.entry {
779 white-space: nowrap;
780 padding-right: 6px;
781 padding-top: 3px;
782 }
783
784 .directory td.entry a {
785 outline:none;
786 }
787
788 .directory td.entry a img {
789 border: none;
790 }
791
792 .directory td.desc {
793 width: 100%;
794 padding-left: 6px;
795 padding-right: 6px;
796 padding-top: 3px;
797 border-left: 1px solid rgba(0,0,0,0.05);
798 }
799
800 .directory tr.even {
801 padding-left: 6px;
802 background-color: #F7F8FB;
803 }
804
805 .directory img {
806 vertical-align: -30%;
807 }
808
809 .directory .levels {
810 white-space: nowrap;
811 width: 100%;
812 text-align: right;
813 font-size: 9pt;
814 }
815
816 .directory .levels span {
817 cursor: pointer;
818 padding-left: 2px;
819 padding-right: 2px;
820 color: #3D578C;
821 }
822
823 .arrow {
824 color: #9CAFD4;
825 -webkit-user-select: none;
826 -khtml-user-select: none;
827 -moz-user-select: none;
828 -ms-user-select: none;
829 user-select: none;
830 cursor: pointer;
831 font-size: 80%;
832 display: inline-block;
833 width: 16px;
834 height: 22px;
835 }
836
837 .icon {
838 font-family: Arial, Helvetica;
839 font-weight: bold;
840 font-size: 12px;
841 height: 14px;
842 width: 16px;
843 display: inline-block;
844 background-color: #728DC1;
845 color: white;
846 text-align: center;
847 border-radius: 4px;
848 margin-left: 2px;
849 margin-right: 2px;
850 }
851
852 .icona {
853 width: 24px;
854 height: 22px;
855 display: inline-block;
856 }
857
858 .iconfopen {
859 width: 24px;
860 height: 18px;
861 margin-bottom: 4px;
862 background-image:url('folderopen.png');
863 background-position: 0px -4px;
864 background-repeat: repeat-y;
865 vertical-align:top;
866 display: inline-block;
867 }
868
869 .iconfclosed {
870 width: 24px;
871 height: 18px;
872 margin-bottom: 4px;
873 background-image:url('folderclosed.png');
874 background-position: 0px -4px;
875 background-repeat: repeat-y;
876 vertical-align:top;
877 display: inline-block;
878 }
879
880 .icondoc {
881 width: 24px;
882 height: 18px;
883 margin-bottom: 4px;
884 background-image:url('doc.png');
885 background-position: 0px -4px;
886 background-repeat: repeat-y;
887 vertical-align:top;
888 display: inline-block;
889 }
890
891 table.directory {
892 font: 400 14px Roboto,sans-serif;
893 }
894
895 /* @end */
896
897 div.dynheader {
898 margin-top: 8px;
899 -webkit-touch-callout: none;
900 -webkit-user-select: none;
901 -khtml-user-select: none;
902 -moz-user-select: none;
903 -ms-user-select: none;
904 user-select: none;
905 }
906
907 address {
908 font-style: normal;
909 color: #2A3D61;
910 }
911
912 table.doxtable caption {
913 caption-side: top;
914 }
915
916 table.doxtable {
917 border-collapse:collapse;
918 margin-top: 4px;
919 margin-bottom: 4px;
920 }
921
922 table.doxtable td, table.doxtable th {
923 border: 1px solid #2D4068;
924 padding: 3px 7px 2px;
925 }
926
927 table.doxtable th {
928 background-color: #374F7F;
929 color: #FFFFFF;
930 font-size: 110%;
931 padding-bottom: 4px;
932 padding-top: 5px;
933 }
934
935 table.fieldtable {
936 /*width: 100%;*/
937 margin-bottom: 10px;
938 border: 1px solid #A8B8D9;
939 border-spacing: 0px;
940 -moz-border-radius: 4px;
941 -webkit-border-radius: 4px;
942 border-radius: 4px;
943 -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
944 -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
945 box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
946 }
947
948 .fieldtable td, .fieldtable th {
949 padding: 3px 7px 2px;
950 }
951
952 .fieldtable td.fieldtype, .fieldtable td.fieldname {
953 white-space: nowrap;
954 border-right: 1px solid #A8B8D9;
955 border-bottom: 1px solid #A8B8D9;
956 vertical-align: top;
957 }
958
959 .fieldtable td.fieldname {
960 padding-top: 3px;
961 }
962
963 .fieldtable td.fielddoc {
964 border-bottom: 1px solid #A8B8D9;
965 /*width: 100%;*/
966 }
967
968 .fieldtable td.fielddoc p:first-child {
969 margin-top: 0px;
970 }
971
972 .fieldtable td.fielddoc p:last-child {
973 margin-bottom: 2px;
974 }
975
976 .fieldtable tr:last-child td {
977 border-bottom: none;
978 }
979
980 .fieldtable th {
981 background-image:url('nav_f.png');
982 background-repeat:repeat-x;
983 background-color: #E2E8F2;
984 font-size: 90%;
985 color: #253555;
986 padding-bottom: 4px;
987 padding-top: 5px;
988 text-align:left;
989 font-weight: 400;
990 -moz-border-radius-topleft: 4px;
991 -moz-border-radius-topright: 4px;
992 -webkit-border-top-left-radius: 4px;
993 -webkit-border-top-right-radius: 4px;
994 border-top-left-radius: 4px;
995 border-top-right-radius: 4px;
996 border-bottom: 1px solid #A8B8D9;
997 }
998
999
1000 .tabsearch {
1001 top: 0px;
1002 left: 10px;
1003 height: 36px;
1004 background-image: url('tab_b.png');
1005 z-index: 101;
1006 overflow: hidden;
1007 font-size: 13px;
1008 }
1009
1010 .navpath ul
1011 {
1012 font-size: 11px;
1013 background-image:url('tab_b.png');
1014 background-repeat:repeat-x;
1015 background-position: 0 -5px;
1016 height:30px;
1017 line-height:30px;
1018 color:#8AA0CC;
1019 border:solid 1px #C2CDE4;
1020 overflow:hidden;
1021 margin:0px;
1022 padding:0px;
1023 }
1024
1025 .navpath li
1026 {
1027 list-style-type:none;
1028 float:left;
1029 padding-left:10px;
1030 padding-right:15px;
1031 background-image:url('bc_s.png');
1032 background-repeat:no-repeat;
1033 background-position:right;
1034 color:#364D7C;
1035 }
1036
1037 .navpath li.navelem a
1038 {
1039 height:32px;
1040 display:block;
1041 text-decoration: none;
1042 outline: none;
1043 color: #283A5D;
1044 font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
1045 text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
1046 text-decoration: none;
1047 }
1048
1049 .navpath li.navelem a:hover
1050 {
1051 color:#6884BD;
1052 }
1053
1054 .navpath li.footer
1055 {
1056 list-style-type:none;
1057 float:right;
1058 padding-left:10px;
1059 padding-right:15px;
1060 background-image:none;
1061 background-repeat:no-repeat;
1062 background-position:right;
1063 color:#364D7C;
1064 font-size: 8pt;
1065 }
1066
1067
1068 div.summary
1069 {
1070 float: right;
1071 font-size: 8pt;
1072 padding-right: 5px;
1073 width: 50%;
1074 text-align: right;
1075 }
1076
1077 div.summary a
1078 {
1079 white-space: nowrap;
1080 }
1081
1082 table.classindex
1083 {
1084 margin: 10px;
1085 white-space: nowrap;
1086 margin-left: 3%;
1087 margin-right: 3%;
1088 width: 94%;
1089 border: 0;
1090 border-spacing: 0;
1091 padding: 0;
1092 }
1093
1094 div.ingroups
1095 {
1096 font-size: 8pt;
1097 width: 50%;
1098 text-align: left;
1099 }
1100
1101 div.ingroups a
1102 {
1103 white-space: nowrap;
1104 }
1105
1106 div.header
1107 {
1108 background-image:url('nav_h.png');
1109 background-repeat:repeat-x;
1110 background-color: #F9FAFC;
1111 margin: 0px;
1112 border-bottom: 1px solid #C4CFE5;
1113 }
1114
1115 div.headertitle
1116 {
1117 padding: 5px 5px 5px 10px;
1118 }
1119
1120 .PageDocRTL-title div.headertitle {
1121 text-align: right;
1122 direction: rtl;
1123 }
1124
1125 dl {
1126 padding: 0 0 0 0;
1127 }
1128
1129 /* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug, dl.examples */
1130 dl.section {
1131 margin-left: 0px;
1132 padding-left: 0px;
1133 }
1134
1135 dl.section.DocNodeRTL {
1136 margin-right: 0px;
1137 padding-right: 0px;
1138 }
1139
1140 dl.note {
1141 margin-left: -7px;
1142 padding-left: 3px;
1143 border-left: 4px solid;
1144 border-color: #D0C000;
1145 }
1146
1147 dl.note.DocNodeRTL {
1148 margin-left: 0;
1149 padding-left: 0;
1150 border-left: 0;
1151 margin-right: -7px;
1152 padding-right: 3px;
1153 border-right: 4px solid;
1154 border-color: #D0C000;
1155 }
1156
1157 dl.warning, dl.attention {
1158 margin-left: -7px;
1159 padding-left: 3px;
1160 border-left: 4px solid;
1161 border-color: #FF0000;
1162 }
1163
1164 dl.warning.DocNodeRTL, dl.attention.DocNodeRTL {
1165 margin-left: 0;
1166 padding-left: 0;
1167 border-left: 0;
1168 margin-right: -7px;
1169 padding-right: 3px;
1170 border-right: 4px solid;
1171 border-color: #FF0000;
1172 }
1173
1174 dl.pre, dl.post, dl.invariant {
1175 margin-left: -7px;
1176 padding-left: 3px;
1177 border-left: 4px solid;
1178 border-color: #00D000;
1179 }
1180
1181 dl.pre.DocNodeRTL, dl.post.DocNodeRTL, dl.invariant.DocNodeRTL {
1182 margin-left: 0;
1183 padding-left: 0;
1184 border-left: 0;
1185 margin-right: -7px;
1186 padding-right: 3px;
1187 border-right: 4px solid;
1188 border-color: #00D000;
1189 }
1190
1191 dl.deprecated {
1192 margin-left: -7px;
1193 padding-left: 3px;
1194 border-left: 4px solid;
1195 border-color: #505050;
1196 }
1197
1198 dl.deprecated.DocNodeRTL {
1199 margin-left: 0;
1200 padding-left: 0;
1201 border-left: 0;
1202 margin-right: -7px;
1203 padding-right: 3px;
1204 border-right: 4px solid;
1205 border-color: #505050;
1206 }
1207
1208 dl.todo {
1209 margin-left: -7px;
1210 padding-left: 3px;
1211 border-left: 4px solid;
1212 border-color: #00C0E0;
1213 }
1214
1215 dl.todo.DocNodeRTL {
1216 margin-left: 0;
1217 padding-left: 0;
1218 border-left: 0;
1219 margin-right: -7px;
1220 padding-right: 3px;
1221 border-right: 4px solid;
1222 border-color: #00C0E0;
1223 }
1224
1225 dl.test {
1226 margin-left: -7px;
1227 padding-left: 3px;
1228 border-left: 4px solid;
1229 border-color: #3030E0;
1230 }
1231
1232 dl.test.DocNodeRTL {
1233 margin-left: 0;
1234 padding-left: 0;
1235 border-left: 0;
1236 margin-right: -7px;
1237 padding-right: 3px;
1238 border-right: 4px solid;
1239 border-color: #3030E0;
1240 }
1241
1242 dl.bug {
1243 margin-left: -7px;
1244 padding-left: 3px;
1245 border-left: 4px solid;
1246 border-color: #C08050;
1247 }
1248
1249 dl.bug.DocNodeRTL {
1250 margin-left: 0;
1251 padding-left: 0;
1252 border-left: 0;
1253 margin-right: -7px;
1254 padding-right: 3px;
1255 border-right: 4px solid;
1256 border-color: #C08050;
1257 }
1258
1259 dl.section dd {
1260 margin-bottom: 6px;
1261 }
1262
1263
1264 #projectlogo
1265 {
1266 text-align: center;
1267 vertical-align: bottom;
1268 border-collapse: separate;
1269 }
1270
1271 #projectlogo img
1272 {
1273 border: 0px none;
1274 }
1275
1276 #projectalign
1277 {
1278 vertical-align: middle;
1279 }
1280
1281 #projectname
1282 {
1283 font: 300% Tahoma, Arial,sans-serif;
1284 margin: 0px;
1285 padding: 2px 0px;
1286 }
1287
1288 #projectbrief
1289 {
1290 font: 120% Tahoma, Arial,sans-serif;
1291 margin: 0px;
1292 padding: 0px;
1293 }
1294
1295 #projectnumber
1296 {
1297 font: 50% Tahoma, Arial,sans-serif;
1298 margin: 0px;
1299 padding: 0px;
1300 }
1301
1302 #titlearea
1303 {
1304 padding: 0px;
1305 margin: 0px;
1306 width: 100%;
1307 border-bottom: 1px solid #5373B4;
1308 }
1309
1310 .image
1311 {
1312 text-align: center;
1313 }
1314
1315 .dotgraph
1316 {
1317 text-align: center;
1318 }
1319
1320 .mscgraph
1321 {
1322 text-align: center;
1323 }
1324
1325 .plantumlgraph
1326 {
1327 text-align: center;
1328 }
1329
1330 .diagraph
1331 {
1332 text-align: center;
1333 }
1334
1335 .caption
1336 {
1337 font-weight: bold;
1338 }
1339
1340 div.zoom
1341 {
1342 border: 1px solid #90A5CE;
1343 }
1344
1345 dl.citelist {
1346 margin-bottom:50px;
1347 }
1348
1349 dl.citelist dt {
1350 color:#334975;
1351 float:left;
1352 font-weight:bold;
1353 margin-right:10px;
1354 padding:5px;
1355 }
1356
1357 dl.citelist dd {
1358 margin:2px 0;
1359 padding:5px 0;
1360 }
1361
1362 div.toc {
1363 padding: 14px 25px;
1364 background-color: #F4F6FA;
1365 border: 1px solid #D8DFEE;
1366 border-radius: 7px 7px 7px 7px;
1367 float: right;
1368 height: auto;
1369 margin: 0 8px 10px 10px;
1370 width: 200px;
1371 }
1372
1373 .PageDocRTL-title div.toc {
1374 float: left !important;
1375 text-align: right;
1376 }
1377
1378 div.toc li {
1379 background: url("bdwn.png") no-repeat scroll 0 5px transparent;
1380 font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif;
1381 margin-top: 5px;
1382 padding-left: 10px;
1383 padding-top: 2px;
1384 }
1385
1386 .PageDocRTL-title div.toc li {
1387 background-position-x: right !important;
1388 padding-left: 0 !important;
1389 padding-right: 10px;
1390 }
1391
1392 div.toc h3 {
1393 font: bold 12px/1.2 Arial,FreeSans,sans-serif;
1394 color: #4665A2;
1395 border-bottom: 0 none;
1396 margin: 0;
1397 }
1398
1399 div.toc ul {
1400 list-style: none outside none;
1401 border: medium none;
1402 padding: 0px;
1403 }
1404
1405 div.toc li.level1 {
1406 margin-left: 0px;
1407 }
1408
1409 div.toc li.level2 {
1410 margin-left: 15px;
1411 }
1412
1413 div.toc li.level3 {
1414 margin-left: 30px;
1415 }
1416
1417 div.toc li.level4 {
1418 margin-left: 45px;
1419 }
1420
1421 .PageDocRTL-title div.toc li.level1 {
1422 margin-left: 0 !important;
1423 margin-right: 0;
1424 }
1425
1426 .PageDocRTL-title div.toc li.level2 {
1427 margin-left: 0 !important;
1428 margin-right: 15px;
1429 }
1430
1431 .PageDocRTL-title div.toc li.level3 {
1432 margin-left: 0 !important;
1433 margin-right: 30px;
1434 }
1435
1436 .PageDocRTL-title div.toc li.level4 {
1437 margin-left: 0 !important;
1438 margin-right: 45px;
1439 }
1440
1441 .inherit_header {
1442 font-weight: bold;
1443 color: gray;
1444 cursor: pointer;
1445 -webkit-touch-callout: none;
1446 -webkit-user-select: none;
1447 -khtml-user-select: none;
1448 -moz-user-select: none;
1449 -ms-user-select: none;
1450 user-select: none;
1451 }
1452
1453 .inherit_header td {
1454 padding: 6px 0px 2px 5px;
1455 }
1456
1457 .inherit {
1458 display: none;
1459 }
1460
1461 tr.heading h2 {
1462 margin-top: 12px;
1463 margin-bottom: 4px;
1464 }
1465
1466 /* tooltip related style info */
1467
1468 .ttc {
1469 position: absolute;
1470 display: none;
1471 }
1472
1473 #powerTip {
1474 cursor: default;
1475 white-space: nowrap;
1476 background-color: white;
1477 border: 1px solid gray;
1478 border-radius: 4px 4px 4px 4px;
1479 box-shadow: 1px 1px 7px gray;
1480 display: none;
1481 font-size: smaller;
1482 max-width: 80%;
1483 opacity: 0.9;
1484 padding: 1ex 1em 1em;
1485 position: absolute;
1486 z-index: 2147483647;
1487 }
1488
1489 #powerTip div.ttdoc {
1490 color: grey;
1491 font-style: italic;
1492 }
1493
1494 #powerTip div.ttname a {
1495 font-weight: bold;
1496 }
1497
1498 #powerTip div.ttname {
1499 font-weight: bold;
1500 }
1501
1502 #powerTip div.ttdeci {
1503 color: #006318;
1504 }
1505
1506 #powerTip div {
1507 margin: 0px;
1508 padding: 0px;
1509 font: 12px/16px Roboto,sans-serif;
1510 }
1511
1512 #powerTip:before, #powerTip:after {
1513 content: "";
1514 position: absolute;
1515 margin: 0px;
1516 }
1517
1518 #powerTip.n:after, #powerTip.n:before,
1519 #powerTip.s:after, #powerTip.s:before,
1520 #powerTip.w:after, #powerTip.w:before,
1521 #powerTip.e:after, #powerTip.e:before,
1522 #powerTip.ne:after, #powerTip.ne:before,
1523 #powerTip.se:after, #powerTip.se:before,
1524 #powerTip.nw:after, #powerTip.nw:before,
1525 #powerTip.sw:after, #powerTip.sw:before {
1526 border: solid transparent;
1527 content: " ";
1528 height: 0;
1529 width: 0;
1530 position: absolute;
1531 }
1532
1533 #powerTip.n:after, #powerTip.s:after,
1534 #powerTip.w:after, #powerTip.e:after,
1535 #powerTip.nw:after, #powerTip.ne:after,
1536 #powerTip.sw:after, #powerTip.se:after {
1537 border-color: rgba(255, 255, 255, 0);
1538 }
1539
1540 #powerTip.n:before, #powerTip.s:before,
1541 #powerTip.w:before, #powerTip.e:before,
1542 #powerTip.nw:before, #powerTip.ne:before,
1543 #powerTip.sw:before, #powerTip.se:before {
1544 border-color: rgba(128, 128, 128, 0);
1545 }
1546
1547 #powerTip.n:after, #powerTip.n:before,
1548 #powerTip.ne:after, #powerTip.ne:before,
1549 #powerTip.nw:after, #powerTip.nw:before {
1550 top: 100%;
1551 }
1552
1553 #powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after {
1554 border-top-color: #FFFFFF;
1555 border-width: 10px;
1556 margin: 0px -10px;
1557 }
1558 #powerTip.n:before {
1559 border-top-color: #808080;
1560 border-width: 11px;
1561 margin: 0px -11px;
1562 }
1563 #powerTip.n:after, #powerTip.n:before {
1564 left: 50%;
1565 }
1566
1567 #powerTip.nw:after, #powerTip.nw:before {
1568 right: 14px;
1569 }
1570
1571 #powerTip.ne:after, #powerTip.ne:before {
1572 left: 14px;
1573 }
1574
1575 #powerTip.s:after, #powerTip.s:before,
1576 #powerTip.se:after, #powerTip.se:before,
1577 #powerTip.sw:after, #powerTip.sw:before {
1578 bottom: 100%;
1579 }
1580
1581 #powerTip.s:after, #powerTip.se:after, #powerTip.sw:after {
1582 border-bottom-color: #FFFFFF;
1583 border-width: 10px;
1584 margin: 0px -10px;
1585 }
1586
1587 #powerTip.s:before, #powerTip.se:before, #powerTip.sw:before {
1588 border-bottom-color: #808080;
1589 border-width: 11px;
1590 margin: 0px -11px;
1591 }
1592
1593 #powerTip.s:after, #powerTip.s:before {
1594 left: 50%;
1595 }
1596
1597 #powerTip.sw:after, #powerTip.sw:before {
1598 right: 14px;
1599 }
1600
1601 #powerTip.se:after, #powerTip.se:before {
1602 left: 14px;
1603 }
1604
1605 #powerTip.e:after, #powerTip.e:before {
1606 left: 100%;
1607 }
1608 #powerTip.e:after {
1609 border-left-color: #FFFFFF;
1610 border-width: 10px;
1611 top: 50%;
1612 margin-top: -10px;
1613 }
1614 #powerTip.e:before {
1615 border-left-color: #808080;
1616 border-width: 11px;
1617 top: 50%;
1618 margin-top: -11px;
1619 }
1620
1621 #powerTip.w:after, #powerTip.w:before {
1622 right: 100%;
1623 }
1624 #powerTip.w:after {
1625 border-right-color: #FFFFFF;
1626 border-width: 10px;
1627 top: 50%;
1628 margin-top: -10px;
1629 }
1630 #powerTip.w:before {
1631 border-right-color: #808080;
1632 border-width: 11px;
1633 top: 50%;
1634 margin-top: -11px;
1635 }
1636
1637 @media print
1638 {
1639 #top { display: none; }
1640 #side-nav { display: none; }
1641 #nav-path { display: none; }
1642 body { overflow:visible; }
1643 h1, h2, h3, h4, h5, h6 { page-break-after: avoid; }
1644 .summary { display: none; }
1645 .memitem { page-break-inside: avoid; }
1646 #doc-content
1647 {
1648 margin-left:0 !important;
1649 height:auto !important;
1650 width:auto !important;
1651 overflow:inherit;
1652 display:inline;
1653 }
1654 }
1655
1656 /* @group Markdown */
1657
1658 /*
1659 table.markdownTable {
1660 border-collapse:collapse;
1661 margin-top: 4px;
1662 margin-bottom: 4px;
1663 }
1664
1665 table.markdownTable td, table.markdownTable th {
1666 border: 1px solid #2D4068;
1667 padding: 3px 7px 2px;
1668 }
1669
1670 table.markdownTableHead tr {
1671 }
1672
1673 table.markdownTableBodyLeft td, table.markdownTable th {
1674 border: 1px solid #2D4068;
1675 padding: 3px 7px 2px;
1676 }
1677
1678 th.markdownTableHeadLeft th.markdownTableHeadRight th.markdownTableHeadCenter th.markdownTableHeadNone {
1679 background-color: #374F7F;
1680 color: #FFFFFF;
1681 font-size: 110%;
1682 padding-bottom: 4px;
1683 padding-top: 5px;
1684 }
1685
1686 th.markdownTableHeadLeft {
1687 text-align: left
1688 }
1689
1690 th.markdownTableHeadRight {
1691 text-align: right
1692 }
1693
1694 th.markdownTableHeadCenter {
1695 text-align: center
1696 }
1697 */
1698
1699 table.markdownTable {
1700 border-collapse:collapse;
1701 margin-top: 4px;
1702 margin-bottom: 4px;
1703 }
1704
1705 table.markdownTable td, table.markdownTable th {
1706 border: 1px solid #2D4068;
1707 padding: 3px 7px 2px;
1708 }
1709
1710 table.markdownTable tr {
1711 }
1712
1713 th.markdownTableHeadLeft, th.markdownTableHeadRight, th.markdownTableHeadCenter, th.markdownTableHeadNone {
1714 background-color: #374F7F;
1715 color: #FFFFFF;
1716 font-size: 110%;
1717 padding-bottom: 4px;
1718 padding-top: 5px;
1719 }
1720
1721 th.markdownTableHeadLeft, td.markdownTableBodyLeft {
1722 text-align: left
1723 }
1724
1725 th.markdownTableHeadRight, td.markdownTableBodyRight {
1726 text-align: right
1727 }
1728
1729 th.markdownTableHeadCenter, td.markdownTableBodyCenter {
1730 text-align: center
1731 }
1732
1733 .DocNodeRTL {
1734 text-align: right;
1735 direction: rtl;
1736 }
1737
1738 .DocNodeLTR {
1739 text-align: left;
1740 direction: ltr;
1741 }
1742
1743 table.DocNodeRTL {
1744 width: auto;
1745 margin-right: 0;
1746 margin-left: auto;
1747 }
1748
1749 table.DocNodeLTR {
1750 width: auto;
1751 margin-right: auto;
1752 margin-left: 0;
1753 }
1754
1755 tt, code, kbd, samp
1756 {
1757 display: inline-block;
1758 direction:ltr;
1759 }
1760 /* @end */
1761
1762 u {
1763 text-decoration: underline;
1764 }
1765
0 # Tutorial
1
2 ## Installation and Setup
3
4 * [Installation Guide](md_doc_installation.html)
5 * [CMake options](md_doc_options.html)
6
7 ## Using RawAllocator classes
8
9 * [Using RawAllocators in STL containers, smart pointers, etc.](md_doc_external_usage.html)
10 * [Allocator adapters and storage classes](md_doc_adapters_storage.html)
11 * [Writing classes using a RawAllocator](md_doc_internal_usage.html)
12
13 ## Advanced usage
14
15 * [Writing own RawAllocator classes](md_doc_writing_allocators.html)
16 * [Debugging options and error handling](md_doc_debug_error.html)
17 * [Concepts](md_doc_concepts.html)
0 # Writing own RawAllocator classes
1
2 There are, in general, three different ways and one special case to write a [RawAllocator] class.
3 See the link for the exact requirements and behavior for each function.
4
5 ## 0. Write a normal Allocator class
6
7 Just go ahead and write a normal `Allocator` class. It will work just fine and can be used anywhere a [RawAllocator] is required.
8 Keep in mind, though, that the `construct` or `destroy` functions will not be called and its pointer typedefs not used.
9
10 ## 1. Fulfill the requirements for the default allocator_traits
11
12 This is the easiest way. The default specialization of [allocator_traits] will forward to member functions, if they exist,
13 and has some fallback, if they don't.
14 The following class overrides all the fallbacks:
15
16 ```cpp
17 struct raw_allocator
18 {
19 using is_stateful = std::integral_constant<bool, Value>;
20
21 void* allocate_node(std::size_t size, std::size_t alignment);
22 void deallocate_node(void *node, std::size_t size, std::size_t alignment) noexcept;
23
24 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment);
25 void deallocate_array(void *ptr, std::size_t count, std::size_t size, std::size_t alignment) noexcept;
26
27 std::size_t max_node_size() const;
28 std::size_t max_array_size() const;
29 std::size_t max_alignment() const;
30 };
31 ```
32
33 There are fallbacks for every function except `allocate_node()` and `deallocate_node()`.
34 A minimum class thus only needs to provide those two functions.
35 The fallbacks "do the right thing", for example `allocate_array()` forwards to `allocate_node()`, `is_stateful` is determined via `std::is_empty`
36 and `max_node_size()` returns the maximum possible value.
37
38 Keep in mind that a [RawAllocator] has to be nothrow moveable and be valid to be used as a non-polymorphic base class,
39 i.e. as a `private` base to use EBO.
40
41 The full interface is provided by the [allocator_storage] typedefs.
42 Other classes where this approach is used are [heap_allocator] or [aligned_allocator].
43 The latter also provides the full interface.
44
45 ## 2. Specialize the allocator_traits
46
47 But sometimes it is not attractive to provide the full interface.
48 An example is the library class [memory_stack].
49 Its interface consists of typical behaviors required for a stack, like unwinding,
50 and it does not make sense to provide a `deallocate_node()` function for it since there is no direct way to do so - only via unwinding.
51
52 In this case, the [allocator_traits] can be specialized for your type.
53 Keep in mind that it is in the sub-namespace `memory` of the namespace `foonathan`.
54 It needs to provide the following interface:
55
56 ```cpp
57 template <>
58 class allocator_traits<raw_allocator>
59 {
60 public:
61 using allocator_type = raw_allocator;
62 using is_stateful = std::integral_constant<bool, Value>;
63
64 static void* allocate_node(allocator_type &state, std::size_t size, std::size_t alignment);
65 static void deallocate_node(allocator_type &state, void *node, std::size_t size, std::size_t alignment) noexcept;
66
67 static void* allocate_array(allocator_type &state, std::size_t count, std::size_t size, std::size_t alignment);
68 static void deallocate_array(allocator_type &state, void *array, std::size_t count, std::size_t size, std::size_t alignment) noexcept;
69
70 static std::size_t max_node_size(const allocator_type &state);
71 static std::size_t max_array_size(const allocator_type &state);
72 static std::size_t max_alignment(const allocator_type &state);
73 };
74 ```
75
76 This approach is used in the mentioned [memory_stack] but also the [memory_pool] classes.
77
78 ## 3. Forward all behavior to another class
79
80 The [allocator_traits] provide a typedef `allocator_type`.
81 This type is the actual type used for the (de-)allocation and will be stored in all classes taking a [RawAllocator].
82 Its only requirement is that it is implicitly constructible from the actual type instantiated and that it is a [RawAllocator].
83
84 The main use for this typedef is to support `Allocator` classes.
85 They need to be rebound to `char` to allow byte-size allocations prior before they are actually used.
86
87 Using this technique otherwise is rather esoteric and I do not see any reason for it, but it is possible.
88 Let there be a class `raw_allocator` that is a [RawAllocator], i.e. it provides the appropriate traits interface using any of the mentioned ways.
89 This class also provides a constructor taking the class `my_allocator` that wants to forward to it.
90 Then you only need to write:
91
92 ```cpp
93 class my_allocator {...};
94
95 class raw_allocator
96 {
97 public:
98 raw_allocator(my_allocator &)
99 {
100 ...
101 }
102
103 // provides the required interface or has a traits specialization
104 };
105
106 ...
107 template <>
108 class allocator_traits<my_allocator>
109 : public allocator_traits<raw_allocator>
110 {};
111 ```
112
113 [allocator_traits]: \ref foonathan::memory::allocator_traits
114 [allocator_storage]: \ref foonathan::memory::allocator_storage
115 [aligned_allocator]: \ref foonathan::memory::aligned_allocator
116 [heap_allocator]: \ref foonathan::memory::heap_allocator
117 [memory_stack]: \ref foonathan::memory::memory_stack
118 [memory_pool]: \ref foonathan::memory::memory_pool
119 [RawAllocator]: md_doc_concepts.html#concept_rawallocator
0 # Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 # This file is subject to the license terms in the LICENSE file
2 # found in the top-level directory of this distribution.
3
4 # builds examples
5
6 macro(_foonathan_add_example name)
7 add_executable(foonathan_memory_example_${name} ${name}.cpp)
8 target_link_libraries(foonathan_memory_example_${name} PRIVATE foonathan_memory)
9 endmacro()
10
11 _foonathan_add_example(allocator_storage)
12 _foonathan_add_example(joint_allocation)
13 _foonathan_add_example(taking_allocators)
14
15 if(NOT FOONATHAN_MEMORY_NO_NODE_SIZE)
16 _foonathan_add_example(tracking)
17 _foonathan_add_example(using_allocators)
18 endif()
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // this example shows how to store allocators by reference and type-erased
5 // see http://foonathan.github.io/doc/memory/md_doc_adapters_storage.html for further details
6
7 #include <iostream>
8 #include <memory>
9
10 #include <foonathan/memory/allocator_storage.hpp> // allocator_reference, any_allocator_reference
11 #include <foonathan/memory/heap_allocator.hpp> // heap_allocator
12 #include <foonathan/memory/memory_stack.hpp> // memory_stack
13
14 // alias namespace foonathan::memory as memory for easier access
15 #include <foonathan/memory/namespace_alias.hpp>
16
17 template <class RawAllocator>
18 void do_sth(memory::allocator_reference<RawAllocator> ref);
19
20 int main()
21 {
22 using namespace memory::literals;
23
24 // storing stateless allocator by reference
25 // heap_allocator is stateless so it does not need to be actually referenced
26 // the reference can take it as a temporary and construct it on the fly
27 memory::allocator_reference<memory::heap_allocator> ref_stateless(memory::heap_allocator{});
28 do_sth(ref_stateless);
29
30 // create a memory_stack
31 // allocates a memory block - initially 4KiB big - and allocates from it in a stack-like manner
32 // deallocation is only done via unwinding to a previously queried marker
33 memory::memory_stack<> stack(4_KiB);
34
35 // storing stateful allocator by reference
36 // memory_stack is stateful and thus the reference actually takes the address of the object
37 // the user has to ensure that the referenced object lives long enough
38 memory::allocator_reference<memory::memory_stack<>> ref_stateful(stack);
39 do_sth(ref_stateful);
40
41 // storing a reference type-erased
42 // any_allocator_reference is an alias for allocator_reference<any_allocator>
43 // it triggers a specialization that uses type-erasure
44 // the tag type can be passed to any class that uses an allocator_reference internally,
45 // like std_allocator or the deep_copy_ptr from the other example
46 memory::any_allocator_reference any1(
47 ref_stateful); // initialize with another allocator reference, will "unwrap"
48 do_sth(any1);
49
50 memory::any_allocator_reference any2(stack); // initialize with a "normal" RawAllocator
51 do_sth(any2);
52
53 memory::any_allocator_reference any3(
54 std::allocator<char>{}); // normal Allocators are RawAllocators, too, so this works
55 do_sth(any3);
56 }
57
58 template <class RawAllocator>
59 void do_sth(memory::allocator_reference<RawAllocator> ref)
60 {
61 // ref is a full-blown RawAllocator that provides all member functions,
62 // so there is no need to use the allocator_traits
63
64 auto node = ref.allocate_node(sizeof(int), alignof(int));
65 std::cout << "Got memory for an int " << node << '\n';
66 ref.deallocate_node(node, sizeof(int), alignof(int));
67 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // this examples shows how to use the joint memory facilities
5 // they allow sharing the same memory for the dynamic allocation of an object
6 // and dynamic allocations the member make
7
8 #include <iostream>
9
10 #include <foonathan/memory/container.hpp> // string
11 #include <foonathan/memory/default_allocator.hpp> // default_allocator
12 #include <foonathan/memory/joint_allocator.hpp> // joint_type, joint_ptr, joint_allocator, joint_array, ...
13
14 // alias namespace foonathan::memory as memory for easier access
15 #include <foonathan/memory/namespace_alias.hpp>
16
17 // define our joint type
18 // we need to inherit from memory::joint_type<T>
19 // the base class injects two pointers for managing the joint memory
20 // and also disables default copy/move semantics
21 // as well as regular creation
22 struct my_type : memory::joint_type<my_type>
23 {
24 // we can define arbitrary members here
25 // in order to access the joint memory
26 // we can use the joint_allocator
27 // it works best with sequence containers
28 // which do not need to grow/shrink
29
30 // for example, a std::string that uses the joint memory for the allocation
31 memory::string<memory::joint_allocator> str;
32 // again: just an alias for std::basic_string<char, std::char_traits<char>,
33 // memory::std_allocator<char, memory::joint_allocator>>
34
35 // the joint_allocator has a slight overhead
36 // for situations where you just need a dynamic, but fixed-sized array, use:
37 memory::joint_array<int> array;
38 // this is similar to std::vector<int> but cannot grow
39 // it is more efficient than using memory::vector<int, memory::joint_allocator>
40
41 // all constructors must take memory::joint as first parameter
42 // as you cannot create the type, you cannot create it by accident
43 // it also contains important metadata (like the allocation size)
44 // and must be passed to the base class
45 // you must pass *this as allocator to members where needed
46 // (i.e. the object with the joint memory)
47 my_type(memory::joint tag, const char* name)
48 : memory::joint_type<my_type>(tag), // pass metadata
49 str(name, memory::joint_allocator(*this)), // create string
50 array({1, 2, 3, 4, 5}, *this) // create array
51 {
52 }
53
54 // default copy/move constructor are deleted
55 // you have to define your own with the memory:;joint as first parameter
56 // IMPORTANT: when you have STL containers as member,
57 // you must use the copy/move constructors with a special allocator
58 // you again have to pass *this as allocator,
59 // so that they use the current object for memory,
60 // not the other one
61 // if you forget it on a copy constructor, your code won't compile
62 // but if you forget on a move constructor, this can't be detected!
63 // note: joint_array will always not compile
64 my_type(memory::joint tag, const my_type& other)
65 : memory::joint_type<my_type>(tag), // again: pass metadata
66 // note: str(other.str, *this) should work as well,
67 // but older GCC don't support it
68 str(other.str.c_str(), memory::joint_allocator(*this)), // important: pass *this as allocator
69 array(other.array, *this) // dito
70 {
71 }
72 };
73
74 int main()
75 {
76 // in order to create an object with joint memory,
77 // you must use the joint_ptr
78 // it is similar to std::unique_ptr,
79 // but it also manages the additional object
80
81 // to create one, use allocate_joint or the constructor
82 // you have to pass the allocator used for the memory allocation,
83 // the size of the additional shared memory
84 // followed by constructor arguments for the type
85 auto ptr = memory::allocate_joint<my_type>(memory::default_allocator{},
86 // be careful with your size calculations
87 // and keep alignment buffers in mind
88 // if your size is too small,
89 // it will throw an exception
90 memory::joint_size(20 * sizeof(char)
91 + 10 * sizeof(int) + 10),
92 "joint allocations!");
93 // ptr has the type: memory::joint_ptr<my_type, memory::default_allocator>
94 // it points to memory that is big enough for the type
95 // followed by the specified number of bytes for the shared memory
96 // when ptr goes out of scope, it will destroy the object and deallocate memory
97 // note that it is just a single allocation for the entire memory used,
98 // instead of three it would have been otherwise
99
100 // ptr behaves like a pointer to my_type
101 // the joint memory details are hidden away
102 std::cout << ptr->str << '\n';
103 for (auto i : ptr->array)
104 std::cout << i << ' ';
105 std::cout << '\n';
106
107 // if your type provides the joint copy constructor,
108 // you can also clone it
109 // this will only allocate enough memory as used by the original object
110 // so you can, for example, use temporary_allocator with a large joint size
111 // to create the object initially,
112 // then clone it to get a buffer that fits
113 // and destroy the original one
114 auto ptr2 = memory::clone_joint(memory::default_allocator{}, *ptr);
115 std::cout << ptr2->str << '\n';
116 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // this example provides two implementation of a deep_copy_ptr that performs a copy when copying the pointer
5 // the first version takes an Allocator the second a RawAllocator
6 // see http://foonathan.github.io/doc/memory/md_doc_internal_usage.html for a step-by-step walkthrough
7 // I know the class is pretty dumb and not that great designed (copy performs deep copy, move invalidates), but that's not that the point
8
9 #include <cassert>
10 #include <memory>
11 #include <iostream>
12
13 #include <foonathan/memory/allocator_storage.hpp> // allocator_reference
14 #include <foonathan/memory/default_allocator.hpp> // default_allocator
15
16 // alias namespace foonathan::memory as memory for easier access
17 #include <foonathan/memory/namespace_alias.hpp>
18
19 namespace using_std_allocator
20 {
21 template <typename T, class Allocator = std::allocator<T>>
22 class deep_copy_ptr : Allocator
23 {
24 using traits = std::allocator_traits<Allocator>;
25
26 public:
27 using value_type = typename traits::value_type;
28 using allocator_type = Allocator;
29
30 explicit deep_copy_ptr(const allocator_type& alloc = allocator_type{})
31 : allocator_type(alloc), ptr_(nullptr)
32 {
33 }
34
35 deep_copy_ptr(value_type value, const allocator_type& alloc = allocator_type{})
36 : allocator_type(alloc), ptr_(create(*this, std::move(value)))
37 {
38 }
39
40 deep_copy_ptr(const deep_copy_ptr& other)
41 : allocator_type(traits::select_on_container_copy_construction(other)),
42 ptr_(create(*this, *other))
43 {
44 }
45
46 deep_copy_ptr(deep_copy_ptr&& other) noexcept : allocator_type(std::move(other)),
47 ptr_(other.ptr_)
48 {
49 other.ptr_ = nullptr;
50 }
51
52 ~deep_copy_ptr() noexcept
53 {
54 destroy();
55 }
56
57 deep_copy_ptr& operator=(const deep_copy_ptr& other)
58 {
59 if (traits::propagate_on_container_copy_assignment::value
60 && static_cast<Allocator&>(*this) != other)
61 {
62 allocator_type alloc(other);
63 auto ptr = create(alloc, *other);
64 destroy();
65
66 Allocator::operator=(std::move(alloc));
67 ptr_ = ptr;
68 }
69 else
70 {
71 auto ptr = create(*this, *other);
72 destroy();
73 ptr_ = ptr;
74 }
75 return *this;
76 }
77
78 deep_copy_ptr& operator=(deep_copy_ptr&& other) noexcept(
79 traits::propagate_on_container_move_assignment::value)
80 {
81 if (traits::propagate_on_container_move_assignment::value
82 && static_cast<allocator_type&>(*this) != other)
83 {
84 allocator_type::operator=(std::move(other));
85 ptr_ = other.ptr_;
86 other.ptr_ = nullptr;
87 }
88 else if (static_cast<allocator_type&>(*this) == other)
89 {
90 ptr_ = other.ptr_;
91 other.ptr_ = nullptr;
92 }
93 else
94 {
95 auto ptr = create(*this, std::move(*other));
96 destroy();
97 ptr_ = ptr;
98 }
99 return *this;
100 }
101
102 friend void swap(deep_copy_ptr& a, deep_copy_ptr& b) noexcept
103 {
104 using std::swap;
105 if (traits::propagate_on_container_swap::value)
106 swap(static_cast<allocator_type&>(a), static_cast<allocator_type&>(b));
107 else
108 assert(static_cast<allocator_type&>(a) == b);
109 swap(a.ptr_, b.ptr_);
110 }
111
112 explicit operator bool() const
113 {
114 return !!ptr_;
115 }
116
117 T& operator*()
118 {
119 return *ptr_;
120 }
121
122 const T& operator*() const
123 {
124 return *ptr_;
125 }
126
127 typename traits::pointer operator->()
128 {
129 return ptr_;
130 }
131
132 typename traits::const_pointer operator->() const
133 {
134 return ptr_;
135 }
136
137 private:
138 template <typename... Args>
139 typename traits::pointer create(allocator_type& alloc, Args&&... args)
140 {
141 auto ptr = traits::allocate(alloc, 1);
142 try
143 {
144 traits::construct(alloc, ptr, std::forward<Args>(args)...);
145 }
146 catch (...)
147 {
148 traits::deallocate(alloc, ptr, 1);
149 throw;
150 }
151 return ptr;
152 }
153
154 void destroy() noexcept
155 {
156 if (ptr_)
157 {
158 traits::destroy(*this, ptr_);
159 traits::deallocate(*this, ptr_, 1);
160 }
161 }
162
163 typename traits::pointer ptr_;
164 };
165 }
166
167 namespace using_raw_allocator
168 {
169 template <typename T,
170 class RawAllocator =
171 memory::default_allocator> // default allocator type, usually heap_allocator
172 class deep_copy_ptr
173 : memory::
174 allocator_reference<RawAllocator> // store the allocator by reference to allow sharing and copying
175 // for stateless allocators like default_allocator, it does not store an actual reference
176 // and can take a temporary, for stateful, the allocator must outlive the reference
177 {
178 using allocator_ref = memory::allocator_reference<RawAllocator>;
179
180 public:
181 using value_type = T;
182 using allocator_type = typename allocator_ref::allocator_type;
183
184 explicit deep_copy_ptr(allocator_ref alloc = allocator_type{})
185 : allocator_ref(alloc), ptr_(nullptr)
186 {
187 }
188
189 deep_copy_ptr(value_type value, allocator_ref alloc = allocator_type{})
190 : allocator_ref(alloc), ptr_(create(std::move(value)))
191 {
192 }
193
194 deep_copy_ptr(const deep_copy_ptr& other) : allocator_ref(other), ptr_(create(*other))
195 {
196 }
197
198 deep_copy_ptr(deep_copy_ptr&& other) noexcept : allocator_ref(std::move(other)),
199 ptr_(other.ptr_)
200 {
201 other.ptr_ = nullptr;
202 }
203
204 ~deep_copy_ptr() noexcept
205 {
206 destroy();
207 }
208
209 // assignment uses straightforward copy/move-and-swap idiom, instead of boilerplate required by Allocator
210 deep_copy_ptr& operator=(const deep_copy_ptr& other)
211 {
212 deep_copy_ptr tmp(other);
213 swap(*this, tmp);
214 return *this;
215 }
216
217 deep_copy_ptr& operator=(deep_copy_ptr&& other) noexcept
218 {
219 deep_copy_ptr tmp(std::move(other));
220 swap(*this, tmp);
221 return *this;
222 }
223
224 // swap is straightforward too
225 friend void swap(deep_copy_ptr& a, deep_copy_ptr& b) noexcept
226 {
227 using std::swap;
228 swap(static_cast<allocator_ref&>(a), static_cast<allocator_ref&>(b));
229 swap(a.ptr_, b.ptr_);
230 }
231
232 explicit operator bool() const
233 {
234 return !!ptr_;
235 }
236
237 T& operator*()
238 {
239 return *ptr_;
240 }
241
242 const T& operator*() const
243 {
244 return *ptr_;
245 }
246
247 T* operator->()
248 {
249 return ptr_;
250 }
251
252 const T* operator->() const
253 {
254 return ptr_;
255 }
256
257 private:
258 template <typename... Args>
259 T* create(Args&&... args)
260 {
261 auto storage =
262 this->allocate_node(sizeof(T), alignof(T)); // first allocate storage for the node
263 try
264 {
265 ::new (storage) T(std::forward<Args>(args)...); // then call constructor
266 }
267 catch (...)
268 {
269 this->deallocate_node(storage, sizeof(T),
270 alignof(T)); // if failure, deallocate storage again
271 throw;
272 }
273 return static_cast<T*>(storage);
274 }
275
276 void destroy() noexcept
277 {
278 if (ptr_)
279 {
280 ptr_->~T(); // call destructor
281 this->deallocate_node(ptr_, sizeof(T), alignof(T)); // deallocate storage
282 }
283 }
284
285 T* ptr_;
286 };
287 }
288
289 // simple usage functions
290
291 template <class Ptr>
292 void use_ptr()
293 {
294 Ptr a(4), b(3);
295 std::cout << *a << ' ' << *b << '\n';
296 swap(a, b);
297
298 auto c = a;
299 std::cout << *a << ' ' << *c << '\n';
300
301 auto d = std::move(b);
302 std::cout << std::boolalpha << *d << ' ' << !!b << '\n';
303 }
304
305 int main()
306 {
307 std::cout << "Allocator\n\n";
308 use_ptr<using_std_allocator::deep_copy_ptr<int>>();
309 std::cout << "\n\nRawAllocator\n\n";
310 use_ptr<using_raw_allocator::deep_copy_ptr<int>>();
311 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // this example shows how to track allocations
5 // see https://foonathan.net/memory/md_doc_adapters_storage.html for further details
6
7 #include <iostream>
8
9 #include <foonathan/memory/container.hpp> // set, set_node_size
10 #include <foonathan/memory/memory_pool.hpp> // memory_pool
11 #include <foonathan/memory/tracking.hpp> // make_tracked_allocator
12
13 // alias namespace foonathan::memory as memory for easier access
14 #include <foonathan/memory/namespace_alias.hpp>
15
16 int main()
17 {
18 using namespace memory::literals;
19
20 // tracker class that logs internal behavior of the allocator
21 struct tracker
22 {
23 void on_node_allocation(void* mem, std::size_t size, std::size_t) noexcept
24 {
25 std::clog << this << " node allocated: ";
26 std::clog << mem << " (" << size << ") " << '\n';
27 }
28
29 void on_array_allocation(void* mem, std::size_t count, std::size_t size,
30 std::size_t) noexcept
31 {
32 std::clog << this << " array allocated: ";
33 std::clog << mem << " (" << count << " * " << size << ") " << '\n';
34 }
35
36 void on_node_deallocation(void* ptr, std::size_t, std::size_t) noexcept
37 {
38 std::clog << this << " node deallocated: " << ptr << " \n";
39 }
40
41 void on_array_deallocation(void* ptr, std::size_t, std::size_t, std::size_t) noexcept
42 {
43 std::clog << this << " array deallocated: " << ptr << " \n";
44 }
45 };
46
47 {
48 // create a tracked default allocator
49 auto tracked_allocator =
50 memory::make_tracked_allocator(tracker{}, memory::default_allocator{});
51
52 // use the allocator as usual
53 // decltype(tracked_allocator) can be used below, too
54 memory::vector<int, memory::tracked_allocator<tracker, memory::default_allocator>>
55 vec({1, 2, 3, 4}, tracked_allocator);
56
57 std::clog << "vec: ";
58 for (auto i : vec)
59 std::clog << i << ' ';
60 std::clog << '\n';
61 }
62
63 {
64 // create a tracked memory_pool to see what kind of allocations are made
65 auto tracked_pool =
66 memory::make_tracked_allocator(tracker{},
67 memory::memory_pool<>(memory::set_node_size<int>::value,
68 4_KiB));
69
70 // use the allocator as usual
71 // decltype(tracked_pool) can be used below, too
72 memory::set<int, memory::tracked_allocator<tracker, memory::memory_pool<>>>
73 set(std::less<int>(), tracked_pool);
74
75 set.insert(1);
76 set.insert(2);
77 set.insert(3);
78 set.insert(1);
79
80 std::clog << "set: ";
81 for (auto i : set)
82 std::clog << i << ' ';
83 std::clog << '\n';
84
85 set.erase(2);
86 }
87 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // this examples shows the basic usage of RawAllocator classes with containers and smart pointers
5 // see http://foonathan.github.io/doc/memory/md_doc_external_usage.html for more details
6
7 #include <algorithm>
8 #include <iostream>
9 #include <iterator>
10
11 #include <foonathan/memory/container.hpp> // vector, list, list_node_size,...
12 #include <foonathan/memory/memory_pool.hpp> // memory_pool
13 #include <foonathan/memory/smart_ptr.hpp> // allocate_unique
14 #include <foonathan/memory/static_allocator.hpp> // static_allocator_storage, static_block_allocator
15 #include <foonathan/memory/temporary_allocator.hpp> // temporary_allocator
16
17 // alias namespace foonathan::memory as memory for easier access
18 #include <foonathan/memory/namespace_alias.hpp>
19
20 template <typename BiIter>
21 void merge_sort(BiIter begin, BiIter end);
22
23 int main()
24 {
25 using namespace memory::literals;
26
27 // a memory pool RawAllocator
28 // allocates a memory block - initially 4KiB - and splits it into chunks of list_node_size<int>::value big
29 // list_node_size<int>::value is the size of each node of a std::list
30 memory::memory_pool<> pool(memory::list_node_size<int>::value, 4_KiB);
31
32 // just an alias for std::list<int, memory::std_allocator<int, memory::memory_pool<>>
33 // a std::list using a memory_pool
34 // std_allocator stores a reference to a RawAllocator and provides the Allocator interface
35 memory::list<int, memory::memory_pool<>> list(pool);
36 list.push_back(3);
37 list.push_back(2);
38 list.push_back(1);
39
40 for (auto e : list)
41 std::cout << e << ' ';
42 std::cout << '\n';
43
44 merge_sort(list.begin(), list.end());
45
46 for (auto e : list)
47 std::cout << e << ' ';
48 std::cout << '\n';
49
50 // allocate a std::unique_ptr using the pool
51 // memory::allocate_shared is also available
52 memory::unique_ptr<int, memory::memory_pool<>> ptr =
53 memory::allocate_unique<int>(pool, *list.begin());
54 std::cout << *ptr << '\n';
55
56 struct base
57 {
58 virtual ~base() = default;
59
60 virtual const char* name() const = 0;
61 };
62
63 struct derived : base
64 {
65 const char* name() const override
66 {
67 return "derived";
68 }
69 };
70
71 // instead of using memory::unique_ptr<base, ...>, you have to use memory::unique_base_ptr<base, ...>,
72 // because the deleter has to remember the size of the derived type
73 memory::unique_base_ptr<base, memory::memory_pool<>> base_ptr =
74 memory::allocate_unique<derived>(pool);
75 std::cout << base_ptr->name() << '\n';
76
77 // static storage of size 4KiB
78 memory::static_allocator_storage<4_KiB> storage;
79
80 // a memory pool again but this time with a BlockAllocator
81 // this controls the internal allocations of the pool itself
82 // we need to specify the first template parameter giving the type of the pool as well
83 // (node_pool is the default)
84 // we use a static_block_allocator that uses the static storage above
85 // all allocations will use a memory block on the stack
86 using static_pool_t = memory::memory_pool<memory::node_pool, memory::static_block_allocator>;
87 static_pool_t static_pool(memory::unordered_set_node_size<int>::value, 4_KiB, storage);
88
89 // again, just an alias for std::unordered_set<int, std::hash<int>, std::equal_to<int>, memory::std_allocator<int, static_pool_t>
90 // see why I wrote these?
91 // now we have a hash set that lives on the stack!
92 memory::unordered_set<int, static_pool_t>
93 set(13, std::hash<int>{}, std::equal_to<int>{},
94 static_pool); // (GCC 4.7 is missing the allocator-only ctor, breaks travis)
95
96 set.insert(3);
97 set.insert(2);
98 set.insert(3); // running out of stack memory is properly handled, of course
99
100 for (auto e : set)
101 std::cout << e << ' ';
102 std::cout << '\n';
103 }
104
105 // naive implementation of merge_sort using temporary memory allocator
106 template <typename BiIter>
107 void merge_sort(BiIter begin, BiIter end)
108 {
109 using value_type = typename std::iterator_traits<BiIter>::value_type;
110
111 auto distance = std::distance(begin, end);
112 if (distance <= 1)
113 return;
114
115 auto mid = begin;
116 std::advance(mid, distance / 2);
117
118 // an allocator for temporary memory
119 // is similar to alloca() but uses its own stack
120 // this stack is thread_local and created the first time it's needed
121 // as soon as the allocator object goes out of scope everything allocated through it will be freed
122 memory::temporary_allocator alloc;
123
124 // alias for std::vector<value_type, memory::std_allocator<value_type, memory::temporary_allocator>>
125 // a std::vector using a temporary_allocator
126 memory::vector<value_type, memory::temporary_allocator> first(begin, mid, alloc),
127 second(mid, end, alloc);
128
129 merge_sort(first.begin(), first.end());
130 merge_sort(second.begin(), second.end());
131 std::merge(first.begin(), first.end(), second.begin(), second.end(), begin);
132 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_ALIGNED_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_ALIGNED_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::aligned_allocator and related functions.
9
10 #include <type_traits>
11
12 #include "detail/assert.hpp"
13 #include "detail/utility.hpp"
14 #include "allocator_traits.hpp"
15 #include "config.hpp"
16
17 namespace foonathan
18 {
19 namespace memory
20 {
21 /// A \concept{concept_rawallocator,RawAllocator} adapter that ensures a minimum alignment.
22 /// It adjusts the alignment value so that it is always larger than the minimum and forwards to the specified allocator.
23 /// \ingroup adapter
24 template <class RawAllocator>
25 class aligned_allocator : FOONATHAN_EBO(allocator_traits<RawAllocator>::allocator_type)
26 {
27 using traits = allocator_traits<RawAllocator>;
28 using composable_traits = composable_allocator_traits<RawAllocator>;
29 using composable = is_composable_allocator<typename traits::allocator_type>;
30
31 public:
32 using allocator_type = typename allocator_traits<RawAllocator>::allocator_type;
33 using is_stateful = std::true_type;
34
35 /// \effects Creates it passing it the minimum alignment value and the allocator object.
36 /// \requires \c min_alignment must be less than \c this->max_alignment().
37 explicit aligned_allocator(std::size_t min_alignment, allocator_type&& alloc = {})
38 : allocator_type(detail::move(alloc)), min_alignment_(min_alignment)
39 {
40 FOONATHAN_MEMORY_ASSERT(min_alignment_ <= max_alignment());
41 }
42
43 /// @{
44 /// \effects Moves the \c aligned_allocator object.
45 /// It simply moves the underlying allocator.
46 aligned_allocator(aligned_allocator&& other) noexcept
47 : allocator_type(detail::move(other)), min_alignment_(other.min_alignment_)
48 {
49 }
50
51 aligned_allocator& operator=(aligned_allocator&& other) noexcept
52 {
53 allocator_type::operator=(detail::move(other));
54 min_alignment_ = other.min_alignment_;
55 return *this;
56 }
57 /// @}
58
59 /// @{
60 /// \effects Forwards to the underlying allocator through the \ref allocator_traits.
61 /// If the \c alignment is less than the \c min_alignment(), it is set to the minimum alignment.
62 void* allocate_node(std::size_t size, std::size_t alignment)
63 {
64 if (min_alignment_ > alignment)
65 alignment = min_alignment_;
66 return traits::allocate_node(get_allocator(), size, alignment);
67 }
68
69 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment)
70 {
71 if (min_alignment_ > alignment)
72 alignment = min_alignment_;
73 return traits::allocate_array(get_allocator(), count, size, alignment);
74 }
75
76 void deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
77 {
78 if (min_alignment_ > alignment)
79 alignment = min_alignment_;
80 traits::deallocate_node(get_allocator(), ptr, size, alignment);
81 }
82
83 void deallocate_array(void* ptr, std::size_t count, std::size_t size,
84 std::size_t alignment) noexcept
85 {
86 if (min_alignment_ > alignment)
87 alignment = min_alignment_;
88 traits::deallocate_array(get_allocator(), ptr, count, size, alignment);
89 }
90 /// @}
91
92 /// @{
93 /// \effects Forwards to the underlying allocator through the \ref composable_allocator_traits.
94 /// If the \c alignment is less than the \c min_alignment(), it is set to the minimum alignment.
95 /// \requires The underyling allocator must be composable.
96 FOONATHAN_ENABLE_IF(composable::value)
97 void* try_allocate_node(std::size_t size, std::size_t alignment) noexcept
98 {
99 if (min_alignment_ > alignment)
100 alignment = min_alignment_;
101 return composable_traits::try_allocate_node(get_allocator(), size, alignment);
102 }
103
104 FOONATHAN_ENABLE_IF(composable::value)
105 void* try_allocate_array(std::size_t count, std::size_t size,
106 std::size_t alignment) noexcept
107 {
108 if (min_alignment_ > alignment)
109 alignment = min_alignment_;
110 return composable_traits::try_allocate_array(get_allocator(), count, size,
111 alignment);
112 }
113
114 FOONATHAN_ENABLE_IF(composable::value)
115 bool try_deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
116 {
117 if (min_alignment_ > alignment)
118 alignment = min_alignment_;
119 return composable_traits::try_deallocate_node(get_allocator(), ptr, size,
120 alignment);
121 }
122
123 FOONATHAN_ENABLE_IF(composable::value)
124 bool try_deallocate_array(void* ptr, std::size_t count, std::size_t size,
125 std::size_t alignment) noexcept
126 {
127 if (min_alignment_ > alignment)
128 alignment = min_alignment_;
129 return composable_traits::try_deallocate_array(get_allocator(), ptr, count, size,
130 alignment);
131 }
132 /// @}
133
134 /// @{
135 /// \returns The value returned by the \ref allocator_traits for the underlying allocator.
136 std::size_t max_node_size() const
137 {
138 return traits::max_node_size(get_allocator());
139 }
140
141 std::size_t max_array_size() const
142 {
143 return traits::max_array_size(get_allocator());
144 }
145
146 std::size_t max_alignment() const
147 {
148 return traits::max_alignment(get_allocator());
149 }
150 /// @}
151
152 /// @{
153 /// \returns A reference to the underlying allocator.
154 allocator_type& get_allocator() noexcept
155 {
156 return *this;
157 }
158
159 const allocator_type& get_allocator() const noexcept
160 {
161 return *this;
162 }
163 /// @}
164
165 /// \returns The minimum alignment.
166 std::size_t min_alignment() const noexcept
167 {
168 return min_alignment_;
169 }
170
171 /// \effects Sets the minimum alignment to a new value.
172 /// \requires \c min_alignment must be less than \c this->max_alignment().
173 void set_min_alignment(std::size_t min_alignment)
174 {
175 FOONATHAN_MEMORY_ASSERT(min_alignment <= max_alignment());
176 min_alignment_ = min_alignment;
177 }
178
179 private:
180 std::size_t min_alignment_;
181 };
182
183 /// \returns A new \ref aligned_allocator created by forwarding the parameters to the constructor.
184 /// \relates aligned_allocator
185 template <class RawAllocator>
186 auto make_aligned_allocator(std::size_t min_alignment, RawAllocator&& allocator) noexcept
187 -> aligned_allocator<typename std::decay<RawAllocator>::type>
188 {
189 return aligned_allocator<
190 typename std::decay<RawAllocator>::type>{min_alignment,
191 detail::forward<RawAllocator>(allocator)};
192 }
193 } // namespace memory
194 } // namespace foonathan
195
196 #endif // FOONATHAN_MEMORY_ALIGNED_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_ALLOCATOR_STORAGE_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_ALLOCATOR_STORAGE_HPP_INCLUDED
6
7 /// \file
8 /// Class template \ref foonathan::memory::allocator_storage, some policies and resulting typedefs.
9
10 #include <new>
11 #include <type_traits>
12
13 #include "detail/utility.hpp"
14 #include "config.hpp"
15 #include "allocator_traits.hpp"
16 #include "threading.hpp"
17
18 namespace foonathan
19 {
20 namespace memory
21 {
22 namespace detail
23 {
24 template <class Alloc>
25 void* try_allocate_node(std::true_type, Alloc& alloc, std::size_t size,
26 std::size_t alignment) noexcept
27 {
28 return composable_allocator_traits<Alloc>::try_allocate_node(alloc, size,
29 alignment);
30 }
31
32 template <class Alloc>
33 void* try_allocate_array(std::true_type, Alloc& alloc, std::size_t count,
34 std::size_t size, std::size_t alignment) noexcept
35 {
36 return composable_allocator_traits<Alloc>::try_allocate_array(alloc, count, size,
37 alignment);
38 }
39
40 template <class Alloc>
41 bool try_deallocate_node(std::true_type, Alloc& alloc, void* ptr, std::size_t size,
42 std::size_t alignment) noexcept
43 {
44 return composable_allocator_traits<Alloc>::try_deallocate_node(alloc, ptr, size,
45 alignment);
46 }
47
48 template <class Alloc>
49 bool try_deallocate_array(std::true_type, Alloc& alloc, void* ptr, std::size_t count,
50 std::size_t size, std::size_t alignment) noexcept
51 {
52 return composable_allocator_traits<Alloc>::try_deallocate_array(alloc, ptr, count,
53 size, alignment);
54 }
55
56 template <class Alloc>
57 void* try_allocate_node(std::false_type, Alloc&, std::size_t, std::size_t) noexcept
58 {
59 FOONATHAN_MEMORY_UNREACHABLE("Allocator is not compositioning");
60 return nullptr;
61 }
62
63 template <class Alloc>
64 void* try_allocate_array(std::false_type, Alloc&, std::size_t, std::size_t,
65 std::size_t) noexcept
66 {
67 FOONATHAN_MEMORY_UNREACHABLE("Allocator is not compositioning");
68 return nullptr;
69 }
70
71 template <class Alloc>
72 bool try_deallocate_node(std::false_type, Alloc&, void*, std::size_t,
73 std::size_t) noexcept
74 {
75 FOONATHAN_MEMORY_UNREACHABLE("Allocator is not compositioning");
76 return false;
77 }
78
79 template <class Alloc>
80 bool try_deallocate_array(std::false_type, Alloc&, void*, std::size_t, std::size_t,
81 std::size_t) noexcept
82 {
83 FOONATHAN_MEMORY_UNREACHABLE("Allocator is not compositioning");
84 return false;
85 }
86 } // namespace detail
87
88 /// A \concept{concept_rawallocator,RawAllocator} that stores another allocator.
89 /// The \concept{concept_storagepolicy,StoragePolicy} defines the allocator type being stored and how it is stored.
90 /// The \c Mutex controls synchronization of the access.
91 /// \ingroup storage
92 template <class StoragePolicy, class Mutex>
93 class allocator_storage
94 : FOONATHAN_EBO(StoragePolicy,
95 detail::mutex_storage<
96 detail::mutex_for<typename StoragePolicy::allocator_type, Mutex>>)
97 {
98 using traits = allocator_traits<typename StoragePolicy::allocator_type>;
99 using composable_traits =
100 composable_allocator_traits<typename StoragePolicy::allocator_type>;
101 using composable = is_composable_allocator<typename StoragePolicy::allocator_type>;
102 using actual_mutex = const detail::mutex_storage<
103 detail::mutex_for<typename StoragePolicy::allocator_type, Mutex>>;
104
105 public:
106 using allocator_type = typename StoragePolicy::allocator_type;
107 using storage_policy = StoragePolicy;
108 using mutex = Mutex;
109 using is_stateful = typename traits::is_stateful;
110
111 /// \effects Creates it by default-constructing the \c StoragePolicy.
112 /// \requires The \c StoragePolicy must be default-constructible.
113 /// \notes The default constructor may create an invalid allocator storage not associated with any allocator.
114 /// If that is the case, it must not be used.
115 allocator_storage() = default;
116
117 /// \effects Creates it by passing it an allocator.
118 /// The allocator will be forwarded to the \c StoragePolicy, it decides whether it will be moved, its address stored or something else.
119 /// \requires The expression <tt>new storage_policy(std::forward<Alloc>(alloc))</tt> must be well-formed,
120 /// otherwise this constructor does not participate in overload resolution.
121 template <
122 class Alloc,
123 // MSVC seems to ignore access rights in SFINAE below
124 // use this to prevent this constructor being chosen instead of move for types inheriting from it
125 FOONATHAN_REQUIRES(
126 (!std::is_base_of<allocator_storage, typename std::decay<Alloc>::type>::value))>
127 allocator_storage(Alloc&& alloc,
128 FOONATHAN_SFINAE(new storage_policy(detail::forward<Alloc>(alloc))))
129 : storage_policy(detail::forward<Alloc>(alloc))
130 {
131 }
132
133 /// \effects Creates it by passing it another \c allocator_storage with a different \c StoragePolicy but the same \c Mutex type.
134 /// Initializes it with the result of \c other.get_allocator().
135 /// \requires The expression <tt>new storage_policy(other.get_allocator())</tt> must be well-formed,
136 /// otherwise this constructor does not participate in overload resolution.
137 template <class OtherPolicy>
138 allocator_storage(const allocator_storage<OtherPolicy, Mutex>& other,
139 FOONATHAN_SFINAE(new storage_policy(other.get_allocator())))
140 : storage_policy(other.get_allocator())
141 {
142 }
143
144 /// @{
145 /// \effects Moves the \c allocator_storage object.
146 /// A moved-out \c allocator_storage object must still store a valid allocator object.
147 allocator_storage(allocator_storage&& other) noexcept
148 : storage_policy(detail::move(other)),
149 detail::mutex_storage<
150 detail::mutex_for<typename StoragePolicy::allocator_type, Mutex>>(
151 detail::move(other))
152 {
153 }
154
155 allocator_storage& operator=(allocator_storage&& other) noexcept
156 {
157 storage_policy:: operator=(detail::move(other));
158 detail::mutex_storage<detail::mutex_for<typename StoragePolicy::allocator_type,
159 Mutex>>::operator=(detail::move(other));
160 return *this;
161 }
162 /// @}
163
164 /// @{
165 /// \effects Copies the \c allocator_storage object.
166 /// \requires The \c StoragePolicy must be copyable.
167 allocator_storage(const allocator_storage&) = default;
168 allocator_storage& operator=(const allocator_storage&) = default;
169 /// @}
170
171 /// @{
172 /// \effects Calls the function on the stored allocator.
173 /// The \c Mutex will be locked during the operation.
174 void* allocate_node(std::size_t size, std::size_t alignment)
175 {
176 std::lock_guard<actual_mutex> lock(*this);
177 auto&& alloc = get_allocator();
178 return traits::allocate_node(alloc, size, alignment);
179 }
180
181 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment)
182 {
183 std::lock_guard<actual_mutex> lock(*this);
184 auto&& alloc = get_allocator();
185 return traits::allocate_array(alloc, count, size, alignment);
186 }
187
188 void deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
189 {
190 std::lock_guard<actual_mutex> lock(*this);
191 auto&& alloc = get_allocator();
192 traits::deallocate_node(alloc, ptr, size, alignment);
193 }
194
195 void deallocate_array(void* ptr, std::size_t count, std::size_t size,
196 std::size_t alignment) noexcept
197 {
198 std::lock_guard<actual_mutex> lock(*this);
199 auto&& alloc = get_allocator();
200 traits::deallocate_array(alloc, ptr, count, size, alignment);
201 }
202
203 std::size_t max_node_size() const
204 {
205 std::lock_guard<actual_mutex> lock(*this);
206 auto&& alloc = get_allocator();
207 return traits::max_node_size(alloc);
208 }
209
210 std::size_t max_array_size() const
211 {
212 std::lock_guard<actual_mutex> lock(*this);
213 auto&& alloc = get_allocator();
214 return traits::max_array_size(alloc);
215 }
216
217 std::size_t max_alignment() const
218 {
219 std::lock_guard<actual_mutex> lock(*this);
220 auto&& alloc = get_allocator();
221 return traits::max_alignment(alloc);
222 }
223 /// @}
224
225 /// @{
226 /// \effects Calls the function on the stored composable allocator.
227 /// The \c Mutex will be locked during the operation.
228 /// \requires The allocator must be composable,
229 /// i.e. \ref is_composable() must return `true`.
230 /// \note This check is done at compile-time where possible,
231 /// and at runtime in the case of type-erased storage.
232 FOONATHAN_ENABLE_IF(composable::value)
233 void* try_allocate_node(std::size_t size, std::size_t alignment) noexcept
234 {
235 FOONATHAN_MEMORY_ASSERT(is_composable());
236 std::lock_guard<actual_mutex> lock(*this);
237 auto&& alloc = get_allocator();
238 return composable_traits::try_allocate_node(alloc, size, alignment);
239 }
240
241 FOONATHAN_ENABLE_IF(composable::value)
242 void* try_allocate_array(std::size_t count, std::size_t size,
243 std::size_t alignment) noexcept
244 {
245 FOONATHAN_MEMORY_ASSERT(is_composable());
246 std::lock_guard<actual_mutex> lock(*this);
247 auto&& alloc = get_allocator();
248 return composable_traits::try_allocate_array(alloc, count, size, alignment);
249 }
250
251 FOONATHAN_ENABLE_IF(composable::value)
252 bool try_deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
253 {
254 FOONATHAN_MEMORY_ASSERT(is_composable());
255 std::lock_guard<actual_mutex> lock(*this);
256 auto&& alloc = get_allocator();
257 return composable_traits::try_deallocate_node(alloc, ptr, size, alignment);
258 }
259
260 FOONATHAN_ENABLE_IF(composable::value)
261 bool try_deallocate_array(void* ptr, std::size_t count, std::size_t size,
262 std::size_t alignment) noexcept
263 {
264 FOONATHAN_MEMORY_ASSERT(is_composable());
265 std::lock_guard<actual_mutex> lock(*this);
266 auto&& alloc = get_allocator();
267 return composable_traits::try_deallocate_array(alloc, ptr, count, size, alignment);
268 }
269 /// @}
270
271 /// @{
272 /// \effects Forwards to the \c StoragePolicy.
273 /// \returns Returns a reference to the stored allocator.
274 /// \note This does not lock the \c Mutex.
275 auto get_allocator() noexcept
276 -> decltype(std::declval<storage_policy>().get_allocator())
277 {
278 return storage_policy::get_allocator();
279 }
280
281 auto get_allocator() const noexcept
282 -> decltype(std::declval<const storage_policy>().get_allocator())
283 {
284 return storage_policy::get_allocator();
285 }
286 /// @}
287
288 /// @{
289 /// \returns A proxy object that acts like a pointer to the stored allocator.
290 /// It cannot be reassigned to point to another allocator object and only moving is supported, which is destructive.
291 /// As long as the proxy object lives and is not moved from, the \c Mutex will be kept locked.
292 auto lock() noexcept -> FOONATHAN_IMPL_DEFINED(decltype(detail::lock_allocator(
293 std::declval<storage_policy>().get_allocator(), std::declval<actual_mutex&>())))
294 {
295 return detail::lock_allocator(get_allocator(), static_cast<actual_mutex&>(*this));
296 }
297
298 auto lock() const noexcept -> FOONATHAN_IMPL_DEFINED(decltype(
299 detail::lock_allocator(std::declval<const storage_policy>().get_allocator(),
300 std::declval<actual_mutex&>())))
301 {
302 return detail::lock_allocator(get_allocator(), static_cast<actual_mutex&>(*this));
303 }
304 /// @}.
305
306 /// \returns Whether or not the stored allocator is composable,
307 /// that is you can use the compositioning functions.
308 /// \note Due to type-erased allocators,
309 /// this function can not be `constexpr`.
310 bool is_composable() const noexcept
311 {
312 return StoragePolicy::is_composable();
313 }
314 };
315
316 /// Tag type that enables type-erasure in \ref reference_storage.
317 /// It can be used everywhere a \ref allocator_reference is used internally.
318 /// \ingroup storage
319 struct any_allocator
320 {
321 };
322
323 /// A \concept{concept_storagepolicy,StoragePolicy} that stores the allocator directly.
324 /// It embeds the allocator inside it, i.e. moving the storage policy will move the allocator.
325 /// \ingroup storage
326 template <class RawAllocator>
327 class direct_storage : FOONATHAN_EBO(allocator_traits<RawAllocator>::allocator_type)
328 {
329 static_assert(!std::is_same<RawAllocator, any_allocator>::value,
330 "cannot type-erase in direct_storage");
331
332 public:
333 using allocator_type = typename allocator_traits<RawAllocator>::allocator_type;
334
335 /// \effects Creates it by default-constructing the allocator.
336 /// \requires The \c RawAllcoator must be default constructible.
337 direct_storage() = default;
338
339 /// \effects Creates it by moving in an allocator object.
340 direct_storage(allocator_type&& allocator) noexcept
341 : allocator_type(detail::move(allocator))
342 {
343 }
344
345 /// @{
346 /// \effects Moves the \c direct_storage object.
347 /// This will move the stored allocator.
348 direct_storage(direct_storage&& other) noexcept : allocator_type(detail::move(other)) {}
349
350 direct_storage& operator=(direct_storage&& other) noexcept
351 {
352 allocator_type::operator=(detail::move(other));
353 return *this;
354 }
355 /// @}
356
357 /// @{
358 /// \returns A (\c const) reference to the stored allocator.
359 allocator_type& get_allocator() noexcept
360 {
361 return *this;
362 }
363
364 const allocator_type& get_allocator() const noexcept
365 {
366 return *this;
367 }
368 /// @}
369
370 protected:
371 ~direct_storage() noexcept = default;
372
373 bool is_composable() const noexcept
374 {
375 return is_composable_allocator<allocator_type>::value;
376 }
377 };
378
379 /// An alias template for \ref allocator_storage using the \ref direct_storage policy without a mutex.
380 /// It has the effect of giving any \concept{concept_rawallocator,RawAllocator} the interface with all member functions,
381 /// avoiding the need to wrap it inside the \ref allocator_traits.
382 /// \ingroup storage
383 template <class RawAllocator>
384 FOONATHAN_ALIAS_TEMPLATE(allocator_adapter,
385 allocator_storage<direct_storage<RawAllocator>, no_mutex>);
386
387 /// \returns A new \ref allocator_adapter object created by forwarding to the constructor.
388 /// \relates allocator_adapter
389 template <class RawAllocator>
390 auto make_allocator_adapter(RawAllocator&& allocator) noexcept
391 -> allocator_adapter<typename std::decay<RawAllocator>::type>
392 {
393 return {detail::forward<RawAllocator>(allocator)};
394 }
395
396 /// An alias template for \ref allocator_storage using the \ref direct_storage policy with a mutex.
397 /// It has a similar effect as \ref allocator_adapter but performs synchronization.
398 /// The \c Mutex will default to \c std::mutex if threading is supported,
399 /// otherwise there is no default.
400 /// \ingroup storage
401 #if FOONATHAN_HOSTED_IMPLEMENTATION
402 template <class RawAllocator, class Mutex = std::mutex>
403 FOONATHAN_ALIAS_TEMPLATE(thread_safe_allocator,
404 allocator_storage<direct_storage<RawAllocator>, Mutex>);
405 #else
406 template <class RawAllocator, class Mutex>
407 FOONATHAN_ALIAS_TEMPLATE(thread_safe_allocator,
408 allocator_storage<direct_storage<RawAllocator>, Mutex>);
409 #endif
410
411 #if FOONATHAN_HOSTED_IMPLEMENTATION
412 /// \returns A new \ref thread_safe_allocator object created by forwarding to the constructor/
413 /// \relates thread_safe_allocator
414 template <class RawAllocator>
415 auto make_thread_safe_allocator(RawAllocator&& allocator)
416 -> thread_safe_allocator<typename std::decay<RawAllocator>::type>
417 {
418 return detail::forward<RawAllocator>(allocator);
419 }
420 #endif
421
422 /// \returns A new \ref thread_safe_allocator object created by forwarding to the constructor,
423 /// specifying a certain mutex type.
424 /// \requires It requires threading support from the implementation.
425 /// \relates thread_safe_allocator
426 template <class Mutex, class RawAllocator>
427 auto make_thread_safe_allocator(RawAllocator&& allocator)
428 -> thread_safe_allocator<typename std::decay<RawAllocator>::type, Mutex>
429 {
430 return detail::forward<RawAllocator>(allocator);
431 }
432
433 namespace detail
434 {
435 struct reference_stateful
436 {
437 };
438 struct reference_stateless
439 {
440 };
441 struct reference_shared
442 {
443 };
444
445 reference_stateful reference_type(std::true_type stateful, std::false_type shared);
446 reference_stateless reference_type(std::false_type stateful, std::true_type shared);
447 reference_stateless reference_type(std::false_type stateful, std::false_type shared);
448 reference_shared reference_type(std::true_type stateful, std::true_type shared);
449
450 template <class RawAllocator, class Tag>
451 class reference_storage_impl;
452
453 // reference to stateful: stores a pointer to an allocator
454 template <class RawAllocator>
455 class reference_storage_impl<RawAllocator, reference_stateful>
456 {
457 protected:
458 reference_storage_impl() noexcept : alloc_(nullptr) {}
459
460 reference_storage_impl(RawAllocator& allocator) noexcept : alloc_(&allocator) {}
461
462 bool is_valid() const noexcept
463 {
464 return alloc_ != nullptr;
465 }
466
467 RawAllocator& get_allocator() const noexcept
468 {
469 FOONATHAN_MEMORY_ASSERT(alloc_ != nullptr);
470 return *alloc_;
471 }
472
473 private:
474 RawAllocator* alloc_;
475 };
476
477 // reference to stateless: store in static storage
478 template <class RawAllocator>
479 class reference_storage_impl<RawAllocator, reference_stateless>
480 {
481 protected:
482 reference_storage_impl() noexcept = default;
483
484 reference_storage_impl(const RawAllocator&) noexcept {}
485
486 bool is_valid() const noexcept
487 {
488 return true;
489 }
490
491 RawAllocator& get_allocator() const noexcept
492 {
493 static RawAllocator alloc;
494 return alloc;
495 }
496 };
497
498 // reference to shared: stores RawAllocator directly
499 template <class RawAllocator>
500 class reference_storage_impl<RawAllocator, reference_shared>
501 {
502 protected:
503 reference_storage_impl() noexcept = default;
504
505 reference_storage_impl(const RawAllocator& alloc) noexcept : alloc_(alloc) {}
506
507 bool is_valid() const noexcept
508 {
509 return true;
510 }
511
512 RawAllocator& get_allocator() const noexcept
513 {
514 return alloc_;
515 }
516
517 private:
518 mutable RawAllocator alloc_;
519 };
520 } // namespace detail
521
522 /// Specifies whether or not a \concept{concept_rawallocator,RawAllocator} has shared semantics.
523 /// It is shared, if - like \ref allocator_reference - if multiple objects refer to the same internal allocator and if it can be copied.
524 /// This sharing is stateful, however, stateless allocators are not considered shared in the meaning of this traits. <br>
525 /// If a \c RawAllocator is shared, it will be directly embedded inside \ref reference_storage since it already provides \ref allocator_reference like semantics, so there is no need to add them manually,<br>
526 /// Specialize it for your own types, if they provide sharing semantics and can be copied.
527 /// They also must provide an `operator==` to check whether two allocators refer to the same shared one.
528 /// \note This makes no guarantees about the lifetime of the shared object, the sharing allocators can either own or refer to a shared object.
529 /// \ingroup storage
530 template <class RawAllocator>
531 struct is_shared_allocator : std::false_type
532 {
533 };
534
535 /// A \concept{concept_storagepolicy,StoragePolicy} that stores a reference to an allocator.
536 /// For stateful allocators it only stores a pointer to an allocator object and copying/moving only copies the pointer.
537 /// For stateless allocators it does not store anything, an allocator will be constructed as needed.
538 /// For allocators that are already shared (determined through \ref is_shared_allocator) it will store the allocator type directly.
539 /// \note It does not take ownership over the allocator in the stateful case, the user has to ensure that the allocator object stays valid.
540 /// In the other cases the lifetime does not matter.
541 /// \ingroup storage
542 template <class RawAllocator>
543 class reference_storage
544 #ifndef DOXYGEN
545 : FOONATHAN_EBO(detail::reference_storage_impl<
546 typename allocator_traits<RawAllocator>::allocator_type,
547 decltype(detail::reference_type(
548 typename allocator_traits<RawAllocator>::is_stateful{},
549 is_shared_allocator<RawAllocator>{}))>)
550 #endif
551 {
552 using storage = detail::reference_storage_impl<
553 typename allocator_traits<RawAllocator>::allocator_type,
554 decltype(
555 detail::reference_type(typename allocator_traits<RawAllocator>::is_stateful{},
556 is_shared_allocator<RawAllocator>{}))>;
557
558 public:
559 using allocator_type = typename allocator_traits<RawAllocator>::allocator_type;
560
561 /// Default constructor.
562 /// \effects If the allocator is stateless, this has no effect and the object is usable as an allocator.
563 /// If the allocator is stateful, creates an invalid reference without any associated allocator.
564 /// Then it must not be used.
565 /// If the allocator is shared, default constructs the shared allocator.
566 /// If the shared allocator does not have a default constructor, this constructor is ill-formed.
567 reference_storage() noexcept = default;
568
569 /// \effects Creates it from a stateless or shared allocator.
570 /// It will not store anything, only creates the allocator as needed.
571 /// \requires The \c RawAllocator is stateless or shared.
572 reference_storage(const allocator_type& alloc) noexcept : storage(alloc) {}
573
574 /// \effects Creates it from a reference to a stateful allocator.
575 /// It will store a pointer to this allocator object.
576 /// \note The user has to take care that the lifetime of the reference does not exceed the allocator lifetime.
577 reference_storage(allocator_type& alloc) noexcept : storage(alloc) {}
578
579 /// @{
580 /// \effects Copies the \c allocator_reference object.
581 /// Only copies the pointer to it in the stateful case.
582 reference_storage(const reference_storage&) noexcept = default;
583 reference_storage& operator=(const reference_storage&) noexcept = default;
584 /// @}
585
586 /// \returns Whether or not the reference is valid.
587 /// It is only invalid, if it was created by the default constructor and the allocator is stateful.
588 explicit operator bool() const noexcept
589 {
590 return storage::is_valid();
591 }
592
593 /// \returns Returns a reference to the allocator.
594 /// \requires The reference must be valid.
595 allocator_type& get_allocator() const noexcept
596 {
597 return storage::get_allocator();
598 }
599
600 protected:
601 ~reference_storage() noexcept = default;
602
603 bool is_composable() const noexcept
604 {
605 return is_composable_allocator<allocator_type>::value;
606 }
607 };
608
609 /// Specialization of the class template \ref reference_storage that is type-erased.
610 /// It is triggered by the tag type \ref any_allocator.
611 /// The specialization can store a reference to any allocator type.
612 /// \ingroup storage
613 template <>
614 class reference_storage<any_allocator>
615 {
616 class base_allocator
617 {
618 public:
619 using is_stateful = std::true_type;
620
621 virtual ~base_allocator() = default;
622
623 virtual void clone(void* storage) const noexcept = 0;
624
625 void* allocate_node(std::size_t size, std::size_t alignment)
626 {
627 return allocate_impl(1, size, alignment);
628 }
629
630 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment)
631 {
632 return allocate_impl(count, size, alignment);
633 }
634
635 void deallocate_node(void* node, std::size_t size, std::size_t alignment) noexcept
636 {
637 deallocate_impl(node, 1, size, alignment);
638 }
639
640 void deallocate_array(void* array, std::size_t count, std::size_t size,
641 std::size_t alignment) noexcept
642 {
643 deallocate_impl(array, count, size, alignment);
644 }
645
646 void* try_allocate_node(std::size_t size, std::size_t alignment) noexcept
647 {
648 return try_allocate_impl(1, size, alignment);
649 }
650
651 void* try_allocate_array(std::size_t count, std::size_t size,
652 std::size_t alignment) noexcept
653 {
654 return try_allocate_impl(count, size, alignment);
655 }
656
657 bool try_deallocate_node(void* node, std::size_t size,
658 std::size_t alignment) noexcept
659 {
660 return try_deallocate_impl(node, 1, size, alignment);
661 }
662
663 bool try_deallocate_array(void* array, std::size_t count, std::size_t size,
664 std::size_t alignment) noexcept
665 {
666 return try_deallocate_impl(array, count, size, alignment);
667 }
668
669 // count 1 means node
670 virtual void* allocate_impl(std::size_t count, std::size_t size,
671 std::size_t alignment) = 0;
672 virtual void deallocate_impl(void* ptr, std::size_t count, std::size_t size,
673 std::size_t alignment) noexcept = 0;
674
675 virtual void* try_allocate_impl(std::size_t count, std::size_t size,
676 std::size_t alignment) noexcept = 0;
677
678 virtual bool try_deallocate_impl(void* ptr, std::size_t count, std::size_t size,
679 std::size_t alignment) noexcept = 0;
680
681 std::size_t max_node_size() const
682 {
683 return max(query::node_size);
684 }
685
686 std::size_t max_array_size() const
687 {
688 return max(query::array_size);
689 }
690
691 std::size_t max_alignment() const
692 {
693 return max(query::alignment);
694 }
695
696 virtual bool is_composable() const noexcept = 0;
697
698 protected:
699 enum class query
700 {
701 node_size,
702 array_size,
703 alignment
704 };
705
706 virtual std::size_t max(query q) const = 0;
707 };
708
709 public:
710 using allocator_type = FOONATHAN_IMPL_DEFINED(base_allocator);
711
712 /// \effects Creates it from a reference to any stateful \concept{concept_rawallocator,RawAllocator}.
713 /// It will store a pointer to this allocator object.
714 /// \note The user has to take care that the lifetime of the reference does not exceed the allocator lifetime.
715 template <class RawAllocator>
716 reference_storage(RawAllocator& alloc) noexcept
717 {
718 static_assert(sizeof(basic_allocator<RawAllocator>)
719 <= sizeof(basic_allocator<default_instantiation>),
720 "requires all instantiations to have certain maximum size");
721 ::new (static_cast<void*>(&storage_)) basic_allocator<RawAllocator>(alloc);
722 }
723
724 // \effects Creates it from any stateless \concept{concept_rawallocator,RawAllocator}.
725 /// It will not store anything, only creates the allocator as needed.
726 /// \requires The \c RawAllocator is stateless.
727 template <class RawAllocator>
728 reference_storage(
729 const RawAllocator& alloc,
730 FOONATHAN_REQUIRES(!allocator_traits<RawAllocator>::is_stateful::value)) noexcept
731 {
732 static_assert(sizeof(basic_allocator<RawAllocator>)
733 <= sizeof(basic_allocator<default_instantiation>),
734 "requires all instantiations to have certain maximum size");
735 ::new (static_cast<void*>(&storage_)) basic_allocator<RawAllocator>(alloc);
736 }
737
738 /// \effects Creates it from the internal base class for the type-erasure.
739 /// Has the same effect as if the actual stored allocator were passed to the other constructor overloads.
740 /// \note This constructor is used internally to avoid double-nesting.
741 reference_storage(const FOONATHAN_IMPL_DEFINED(base_allocator) & alloc) noexcept
742 {
743 alloc.clone(&storage_);
744 }
745
746 /// @{
747 /// \effects Copies the \c reference_storage object.
748 /// It only copies the pointer to the allocator.
749 reference_storage(const reference_storage& other) noexcept
750 {
751 other.get_allocator().clone(&storage_);
752 }
753
754 reference_storage& operator=(const reference_storage& other) noexcept
755 {
756 get_allocator().~allocator_type();
757 other.get_allocator().clone(&storage_);
758 return *this;
759 }
760 /// @}
761
762 /// \returns A reference to the allocator.
763 /// The actual type is implementation-defined since it is the base class used in the type-erasure,
764 /// but it provides the full \concept{concept_rawallocator,RawAllocator} member functions.
765 /// \note There is no way to access any custom member functions of the allocator type.
766 allocator_type& get_allocator() const noexcept
767 {
768 auto mem = static_cast<void*>(&storage_);
769 return *static_cast<base_allocator*>(mem);
770 }
771
772 protected:
773 ~reference_storage() noexcept
774 {
775 get_allocator().~allocator_type();
776 }
777
778 bool is_composable() const noexcept
779 {
780 return get_allocator().is_composable();
781 }
782
783 private:
784 template <class RawAllocator>
785 class basic_allocator
786 : public base_allocator,
787 private detail::reference_storage_impl<
788 typename allocator_traits<RawAllocator>::allocator_type,
789 decltype(
790 detail::reference_type(typename allocator_traits<RawAllocator>::is_stateful{},
791 is_shared_allocator<RawAllocator>{}))>
792 {
793 using traits = allocator_traits<RawAllocator>;
794 using composable = is_composable_allocator<typename traits::allocator_type>;
795 using storage = detail::reference_storage_impl<
796 typename allocator_traits<RawAllocator>::allocator_type,
797 decltype(detail::reference_type(typename allocator_traits<
798 RawAllocator>::is_stateful{},
799 is_shared_allocator<RawAllocator>{}))>;
800
801 public:
802 // non stateful
803 basic_allocator(const RawAllocator& alloc) noexcept : storage(alloc) {}
804
805 // stateful
806 basic_allocator(RawAllocator& alloc) noexcept : storage(alloc) {}
807
808 private:
809 typename traits::allocator_type& get() const noexcept
810 {
811 return storage::get_allocator();
812 }
813
814 void clone(void* storage) const noexcept override
815 {
816 ::new (storage) basic_allocator(get());
817 }
818
819 void* allocate_impl(std::size_t count, std::size_t size,
820 std::size_t alignment) override
821 {
822 auto&& alloc = get();
823 if (count == 1u)
824 return traits::allocate_node(alloc, size, alignment);
825 else
826 return traits::allocate_array(alloc, count, size, alignment);
827 }
828
829 void deallocate_impl(void* ptr, std::size_t count, std::size_t size,
830 std::size_t alignment) noexcept override
831 {
832 auto&& alloc = get();
833 if (count == 1u)
834 traits::deallocate_node(alloc, ptr, size, alignment);
835 else
836 traits::deallocate_array(alloc, ptr, count, size, alignment);
837 }
838
839 void* try_allocate_impl(std::size_t count, std::size_t size,
840 std::size_t alignment) noexcept override
841 {
842 auto&& alloc = get();
843 if (count == 1u)
844 return detail::try_allocate_node(composable{}, alloc, size, alignment);
845 else
846 return detail::try_allocate_array(composable{}, alloc, count, size,
847 alignment);
848 }
849
850 bool try_deallocate_impl(void* ptr, std::size_t count, std::size_t size,
851 std::size_t alignment) noexcept override
852 {
853 auto&& alloc = get();
854 if (count == 1u)
855 return detail::try_deallocate_node(composable{}, alloc, ptr, size,
856 alignment);
857 else
858 return detail::try_deallocate_array(composable{}, alloc, ptr, count, size,
859 alignment);
860 }
861
862 bool is_composable() const noexcept override
863 {
864 return composable::value;
865 }
866
867 std::size_t max(query q) const override
868 {
869 auto&& alloc = get();
870 if (q == query::node_size)
871 return traits::max_node_size(alloc);
872 else if (q == query::array_size)
873 return traits::max_array_size(alloc);
874 return traits::max_alignment(alloc);
875 }
876 };
877
878 // use a stateful instantiation to determine size and alignment
879 // base_allocator is stateful
880 using default_instantiation = basic_allocator<base_allocator>;
881 alignas(default_instantiation) mutable char storage_[sizeof(default_instantiation)];
882 };
883
884 /// An alias template for \ref allocator_storage using the \ref reference_storage policy.
885 /// It will store a reference to the given allocator type. The tag type \ref any_allocator enables type-erasure.
886 /// Wrap the allocator in a \ref thread_safe_allocator if you want thread safety.
887 /// \ingroup storage
888 template <class RawAllocator>
889 FOONATHAN_ALIAS_TEMPLATE(allocator_reference,
890 allocator_storage<reference_storage<RawAllocator>, no_mutex>);
891
892 /// \returns A new \ref allocator_reference object by forwarding the allocator to the constructor.
893 /// \relates allocator_reference
894 template <class RawAllocator>
895 auto make_allocator_reference(RawAllocator&& allocator) noexcept
896 -> allocator_reference<typename std::decay<RawAllocator>::type>
897 {
898 return {detail::forward<RawAllocator>(allocator)};
899 }
900
901 /// An alias for the \ref reference_storage specialization using type-erasure.
902 /// \ingroup storage
903 using any_reference_storage = reference_storage<any_allocator>;
904
905 /// An alias for \ref allocator_storage using the \ref any_reference_storage.
906 /// It will store a reference to any \concept{concept_rawallocator,RawAllocator}.
907 /// This is the same as passing the tag type \ref any_allocator to the alias \ref allocator_reference.
908 /// Wrap the allocator in a \ref thread_safe_allocator if you want thread safety.
909 /// \ingroup storage
910 using any_allocator_reference = allocator_storage<any_reference_storage, no_mutex>;
911
912 /// \returns A new \ref any_allocator_reference object by forwarding the allocator to the constructor.
913 /// \relates any_allocator_reference
914 template <class RawAllocator>
915 auto make_any_allocator_reference(RawAllocator&& allocator) noexcept
916 -> any_allocator_reference
917 {
918 return {detail::forward<RawAllocator>(allocator)};
919 }
920 } // namespace memory
921 } // namespace foonathan
922
923 #endif // FOONATHAN_MEMORY_ALLOCATOR_STORAGE_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_ALLOCATOR_TRAITS_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_ALLOCATOR_TRAITS_HPP_INCLUDED
6
7 /// \file
8 /// The default specialization of the \ref foonathan::memory::allocator_traits.
9
10 #include <cstddef>
11 #include <type_traits>
12
13 #include "detail/align.hpp"
14 #include "detail/utility.hpp"
15 #include "config.hpp"
16
17 #if FOONATHAN_HOSTED_IMPLEMENTATION
18 #include <memory>
19 #endif
20
21 namespace foonathan
22 {
23 namespace memory
24 {
25 namespace detail
26 {
27 template <class Allocator>
28 std::true_type has_construct(int, FOONATHAN_SFINAE(std::declval<Allocator>().construct(
29 std::declval<typename Allocator::pointer>(),
30 std::declval<typename Allocator::value_type>())));
31
32 template <class Allocator>
33 std::false_type has_construct(short);
34
35 template <class Allocator>
36 std::true_type has_destroy(int, FOONATHAN_SFINAE(std::declval<Allocator>().destroy(
37 std::declval<typename Allocator::pointer>())));
38
39 template <class Allocator>
40 std::false_type has_destroy(short);
41
42 template <class Allocator>
43 struct check_standard_allocator
44 {
45 using custom_construct = decltype(has_construct<Allocator>(0));
46 using custom_destroy = decltype(has_destroy<Allocator>(0));
47
48 using valid = std::integral_constant<bool, !custom_construct::value
49 && !custom_destroy::value>;
50 };
51 } // namespace detail
52
53 /// Traits class that checks whether or not a standard \c Allocator can be used as \concept{concept_rawallocator,RawAllocator}.
54 /// It checks the existence of a custom \c construct(), \c destroy() function, if provided,
55 /// it cannot be used since it would not be called.<br>
56 /// Specialize it for custom \c Allocator types to override this check.
57 /// \ingroup core
58 template <class Allocator>
59 struct allocator_is_raw_allocator
60 : FOONATHAN_EBO(detail::check_standard_allocator<Allocator>::valid)
61 {
62 };
63
64 /// Specialization of \ref allocator_is_raw_allocator that allows \c std::allocator again.
65 /// \ingroup core
66 template <typename T>
67 struct allocator_is_raw_allocator<std::allocator<T>> : std::true_type
68 {
69 };
70
71 namespace traits_detail // use seperate namespace to avoid name clashes
72 {
73 // full_concept has the best conversion rank, error the lowest
74 // used to give priority to the functions
75 struct error
76 {
77 operator void*() const noexcept
78 {
79 FOONATHAN_MEMORY_UNREACHABLE(
80 "this is just to hide an error and move static_assert to the front");
81 return nullptr;
82 }
83 };
84 struct std_concept : error
85 {
86 };
87 struct min_concept : std_concept
88 {
89 };
90 struct full_concept : min_concept
91 {
92 };
93
94 // used to delay assert in handle_error() until instantiation
95 template <typename T>
96 struct invalid_allocator_concept
97 {
98 static const bool error = false;
99 };
100
101 //=== allocator_type ===//
102 // if Allocator has a member template `rebind`, use that to rebind to `char`
103 // else if Allocator has a member `value_type`, rebind by changing argument
104 // else does nothing
105 template <class Allocator>
106 auto rebind_impl(int) -> typename Allocator::template rebind<char>::other&;
107
108 template <class Allocator, typename T>
109 struct allocator_rebinder
110 {
111 using type = Allocator&;
112 };
113
114 template <template <typename, typename...> class Alloc, typename U, typename... Args,
115 typename T>
116 struct allocator_rebinder<Alloc<U, Args...>, T>
117 {
118 using type = Alloc<T, Args...>&;
119 };
120
121 template <class Allocator, typename = typename Allocator::value_type>
122 auto rebind_impl(char) -> typename allocator_rebinder<Allocator, char>::type;
123
124 template <class Allocator>
125 auto rebind_impl(...) -> Allocator&;
126
127 template <class Allocator>
128 struct allocator_type_impl // required for MSVC
129 {
130 using type = decltype(rebind_impl<Allocator>(0));
131 };
132
133 template <class Allocator>
134 using allocator_type =
135 typename std::decay<typename allocator_type_impl<Allocator>::type>::type;
136
137 //=== is_stateful ===//
138 // first try to access Allocator::is_stateful,
139 // then use whether or not the type is empty
140 template <class Allocator>
141 auto is_stateful(full_concept) -> decltype(typename Allocator::is_stateful{});
142
143 template <class Allocator, bool IsEmpty>
144 struct is_stateful_impl;
145
146 template <class Allocator>
147 struct is_stateful_impl<Allocator, true>
148 {
149 static_assert(std::is_default_constructible<Allocator>::value,
150 "RawAllocator is empty but not default constructible ."
151 "This means it is not a stateless allocator. "
152 "If this is actually intended provide the appropriate is_stateful "
153 "typedef in your class.");
154 using type = std::false_type;
155 };
156
157 template <class Allocator>
158 struct is_stateful_impl<Allocator, false>
159 {
160 using type = std::true_type;
161 };
162
163 template <class Allocator>
164 auto is_stateful(min_concept) ->
165 typename is_stateful_impl<Allocator, std::is_empty<Allocator>::value>::type;
166
167 //=== allocate_node() ===//
168 // first try Allocator::allocate_node
169 // then assume std_allocator and call Allocator::allocate
170 // then error
171 template <class Allocator>
172 auto allocate_node(full_concept, Allocator& alloc, std::size_t size,
173 std::size_t alignment)
174 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.allocate_node(size, alignment), void*)
175
176 template <class Allocator>
177 auto allocate_node(std_concept, Allocator& alloc, std::size_t size, std::size_t)
178 -> FOONATHAN_AUTO_RETURN(static_cast<void*>(alloc.allocate(size)))
179
180 template <class Allocator>
181 error allocate_node(error, Allocator&, std::size_t, std::size_t)
182 {
183 static_assert(invalid_allocator_concept<Allocator>::error,
184 "type is not a RawAllocator as it does not provide: void* "
185 "allocate_node(std::size_t, "
186 "std::size_t)");
187 return {};
188 }
189
190 //=== deallocate_node() ===//
191 // first try Allocator::deallocate_node
192 // then assume std_allocator and call Allocator::deallocate
193 // then error
194 template <class Allocator>
195 auto deallocate_node(full_concept, Allocator& alloc, void* ptr, std::size_t size,
196 std::size_t alignment) noexcept
197 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.deallocate_node(ptr, size, alignment), void)
198
199 template <class Allocator>
200 auto deallocate_node(std_concept, Allocator& alloc, void* ptr, std::size_t size,
201 std::size_t) noexcept
202 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.deallocate(static_cast<char*>(ptr), size), void)
203
204 template <class Allocator>
205 error deallocate_node(error, Allocator&, void*, std::size_t, std::size_t)
206 {
207 static_assert(invalid_allocator_concept<Allocator>::error,
208 "type is not a RawAllocator as it does not provide: void "
209 "deallocate_node(void*, std::size_t, "
210 "std::size_t)");
211 return error{};
212 }
213
214 //=== allocate_array() ===//
215 // first try Allocator::allocate_array
216 // then forward to allocate_node()
217 template <class Allocator>
218 auto allocate_array(full_concept, Allocator& alloc, std::size_t count, std::size_t size,
219 std::size_t alignment)
220 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.allocate_array(count, size, alignment), void*)
221
222 template <class Allocator>
223 void* allocate_array(min_concept, Allocator& alloc, std::size_t count,
224 std::size_t size, std::size_t alignment)
225 {
226 return allocate_node(full_concept{}, alloc, count * size, alignment);
227 }
228
229 //=== deallocate_array() ===//
230 // first try Allocator::deallocate_array
231 // then forward to deallocate_node()
232 template <class Allocator>
233 auto deallocate_array(full_concept, Allocator& alloc, void* ptr, std::size_t count,
234 std::size_t size, std::size_t alignment) noexcept
235 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.deallocate_array(ptr, count, size, alignment),
236 void)
237
238 template <class Allocator>
239 void deallocate_array(min_concept, Allocator& alloc, void* ptr,
240 std::size_t count, std::size_t size,
241 std::size_t alignment) noexcept
242 {
243 deallocate_node(full_concept{}, alloc, ptr, count * size, alignment);
244 }
245
246 //=== max_node_size() ===//
247 // first try Allocator::max_node_size()
248 // then return maximum value
249 template <class Allocator>
250 auto max_node_size(full_concept, const Allocator& alloc)
251 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.max_node_size(), std::size_t)
252
253 template <class Allocator>
254 std::size_t max_node_size(min_concept, const Allocator&) noexcept
255 {
256 return std::size_t(-1);
257 }
258
259 //=== max_node_size() ===//
260 // first try Allocator::max_array_size()
261 // then forward to max_node_size()
262 template <class Allocator>
263 auto max_array_size(full_concept, const Allocator& alloc)
264 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.max_array_size(), std::size_t)
265
266 template <class Allocator>
267 std::size_t max_array_size(min_concept, const Allocator& alloc)
268 {
269 return max_node_size(full_concept{}, alloc);
270 }
271
272 //=== max_alignment() ===//
273 // first try Allocator::max_alignment()
274 // then return detail::max_alignment
275 template <class Allocator>
276 auto max_alignment(full_concept, const Allocator& alloc)
277 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.max_alignment(), std::size_t)
278
279 template <class Allocator>
280 std::size_t max_alignment(min_concept, const Allocator&)
281 {
282 return detail::max_alignment;
283 }
284 } // namespace traits_detail
285
286 /// The default specialization of the allocator_traits for a \concept{concept_rawallocator,RawAllocator}.
287 /// See the last link for the requirements on types that do not specialize this class and the interface documentation.
288 /// Any specialization must provide the same interface.
289 /// \ingroup core
290 template <class Allocator>
291 class allocator_traits
292 {
293 public:
294 using allocator_type = traits_detail::allocator_type<Allocator>;
295 using is_stateful =
296 decltype(traits_detail::is_stateful<Allocator>(traits_detail::full_concept{}));
297
298 static void* allocate_node(allocator_type& state, std::size_t size,
299 std::size_t alignment)
300 {
301 static_assert(allocator_is_raw_allocator<Allocator>::value,
302 "Allocator cannot be used as RawAllocator because it provides custom "
303 "construct()/destroy()");
304 return traits_detail::allocate_node(traits_detail::full_concept{}, state, size,
305 alignment);
306 }
307
308 static void* allocate_array(allocator_type& state, std::size_t count, std::size_t size,
309 std::size_t alignment)
310 {
311 static_assert(allocator_is_raw_allocator<Allocator>::value,
312 "Allocator cannot be used as RawAllocator because it provides custom "
313 "construct()/destroy()");
314 return traits_detail::allocate_array(traits_detail::full_concept{}, state, count,
315 size, alignment);
316 }
317
318 static void deallocate_node(allocator_type& state, void* node, std::size_t size,
319 std::size_t alignment) noexcept
320 {
321 static_assert(allocator_is_raw_allocator<Allocator>::value,
322 "Allocator cannot be used as RawAllocator because it provides custom "
323 "construct()/destroy()");
324 traits_detail::deallocate_node(traits_detail::full_concept{}, state, node, size,
325 alignment);
326 }
327
328 static void deallocate_array(allocator_type& state, void* array, std::size_t count,
329 std::size_t size, std::size_t alignment) noexcept
330 {
331 static_assert(allocator_is_raw_allocator<Allocator>::value,
332 "Allocator cannot be used as RawAllocator because it provides custom "
333 "construct()/destroy()");
334 traits_detail::deallocate_array(traits_detail::full_concept{}, state, array, count,
335 size, alignment);
336 }
337
338 static std::size_t max_node_size(const allocator_type& state)
339 {
340 static_assert(allocator_is_raw_allocator<Allocator>::value,
341 "Allocator cannot be used as RawAllocator because it provides custom "
342 "construct()/destroy()");
343 return traits_detail::max_node_size(traits_detail::full_concept{}, state);
344 }
345
346 static std::size_t max_array_size(const allocator_type& state)
347 {
348 static_assert(allocator_is_raw_allocator<Allocator>::value,
349 "Allocator cannot be used as RawAllocator because it provides custom "
350 "construct()/destroy()");
351 return traits_detail::max_array_size(traits_detail::full_concept{}, state);
352 }
353
354 static std::size_t max_alignment(const allocator_type& state)
355 {
356 static_assert(allocator_is_raw_allocator<Allocator>::value,
357 "Allocator cannot be used as RawAllocator because it provides custom "
358 "construct()/destroy()");
359 return traits_detail::max_alignment(traits_detail::full_concept{}, state);
360 }
361
362 #if !defined(DOXYGEN)
363 using foonathan_memory_default_traits = std::true_type;
364 #endif
365 };
366
367 namespace detail
368 {
369 template <class RawAllocator>
370 typename allocator_traits<RawAllocator>::foonathan_memory_default_traits
371 alloc_uses_default_traits(RawAllocator&);
372
373 std::false_type alloc_uses_default_traits(...);
374
375 template <typename T>
376 struct has_invalid_alloc_function
377 : std::is_same<decltype(
378 traits_detail::allocate_node(traits_detail::full_concept{},
379 std::declval<typename allocator_traits<
380 T>::allocator_type&>(),
381 0, 0)),
382 traits_detail::error>
383 {
384 };
385
386 template <typename T>
387 struct has_invalid_dealloc_function
388 : std::is_same<
389 decltype(traits_detail::deallocate_node(traits_detail::full_concept{},
390 std::declval<typename allocator_traits<
391 T>::allocator_type&>(),
392 nullptr, 0, 0)),
393 traits_detail::error>
394 {
395 };
396
397 template <typename T, class DefaultTraits>
398 struct is_raw_allocator : std::true_type
399 {
400 };
401
402 template <typename T>
403 struct is_raw_allocator<T, std::integral_constant<bool, true>>
404 : std::integral_constant<bool, allocator_is_raw_allocator<T>::value
405 && !(has_invalid_alloc_function<T>::value
406 || has_invalid_dealloc_function<T>::value)>
407 {
408 };
409 } // namespace detail
410
411 /// Traits that check whether a type models concept \concept{concept_rawallocator,RawAllocator}.<br>
412 /// It must either provide the necessary functions for the default traits specialization or has specialized it.
413 /// \ingroup core
414 template <typename T>
415 struct is_raw_allocator
416 : detail::is_raw_allocator<T,
417 decltype(detail::alloc_uses_default_traits(std::declval<T&>()))>
418 {
419 };
420
421 namespace traits_detail
422 {
423 //=== try_allocate_node() ===//
424 // try Allocator::try_allocate_node
425 // otherwise error
426 template <class Allocator>
427 auto try_allocate_node(full_concept, Allocator& alloc, std::size_t size,
428 std::size_t alignment) noexcept
429 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.try_allocate_node(size, alignment), void*)
430
431 template <class Allocator>
432 error try_allocate_node(error, Allocator&, std::size_t, std::size_t)
433 {
434 static_assert(invalid_allocator_concept<Allocator>::error,
435 "type is not a composable RawAllocator as it does not provide: void* "
436 "try_allocate_node(std::size_t, "
437 "std::size_t)");
438 return {};
439 }
440
441 //=== try_deallocate_node() ===//
442 // try Allocator::try_deallocate_node
443 // otherwise error
444 template <class Allocator>
445 auto try_deallocate_node(full_concept, Allocator& alloc, void* ptr, std::size_t size,
446 std::size_t alignment) noexcept
447 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.try_deallocate_node(ptr, size, alignment), bool)
448
449 template <class Allocator>
450 error try_deallocate_node(error, Allocator&, void*, std::size_t, std::size_t)
451 {
452 static_assert(invalid_allocator_concept<Allocator>::error,
453 "type is not a composable RawAllocator as it does not provide: bool "
454 "try_deallocate_node(void*, std::size_t, "
455 "std::size_t)");
456 return error{};
457 }
458
459 //=== try_allocate_array() ===//
460 // first try Allocator::try_allocate_array
461 // then forward to try_allocate_node()
462 template <class Allocator>
463 auto try_allocate_array(full_concept, Allocator& alloc, std::size_t count,
464 std::size_t size, std::size_t alignment) noexcept
465 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.try_allocate_array(count, size, alignment),
466 void*)
467
468 template <class Allocator>
469 void* try_allocate_array(min_concept, Allocator& alloc, std::size_t count,
470 std::size_t size, std::size_t alignment)
471 {
472 return try_allocate_node(full_concept{}, alloc, count * size, alignment);
473 }
474
475 //=== try_deallocate_array() ===//
476 // first try Allocator::try_deallocate_array
477 // then forward to try_deallocate_node()
478 template <class Allocator>
479 auto try_deallocate_array(full_concept, Allocator& alloc, void* ptr, std::size_t count,
480 std::size_t size, std::size_t alignment) noexcept
481 -> FOONATHAN_AUTO_RETURN_TYPE(alloc.try_deallocate_array(ptr, count, size,
482 alignment),
483 bool)
484
485 template <class Allocator>
486 bool try_deallocate_array(min_concept, Allocator& alloc, void* ptr,
487 std::size_t count, std::size_t size,
488 std::size_t alignment) noexcept
489 {
490 return try_deallocate_node(full_concept{}, alloc, ptr, count * size, alignment);
491 }
492 } // namespace traits_detail
493
494 /// The default specialization of the composable_allocator_traits for a \concept{concept_composableallocator,ComposableAllocator}.
495 /// See the last link for the requirements on types that do not specialize this class and the interface documentation.
496 /// Any specialization must provide the same interface.
497 /// \ingroup core
498 template <class Allocator>
499 class composable_allocator_traits
500 {
501 public:
502 using allocator_type = typename allocator_traits<Allocator>::allocator_type;
503
504 static void* try_allocate_node(allocator_type& state, std::size_t size,
505 std::size_t alignment) noexcept
506 {
507 static_assert(is_raw_allocator<Allocator>::value,
508 "ComposableAllocator must be RawAllocator");
509 return traits_detail::try_allocate_node(traits_detail::full_concept{}, state, size,
510 alignment);
511 }
512
513 static void* try_allocate_array(allocator_type& state, std::size_t count,
514 std::size_t size, std::size_t alignment) noexcept
515 {
516 static_assert(is_raw_allocator<Allocator>::value,
517 "ComposableAllocator must be RawAllocator");
518 return traits_detail::try_allocate_array(traits_detail::full_concept{}, state,
519 count, size, alignment);
520 }
521
522 static bool try_deallocate_node(allocator_type& state, void* node, std::size_t size,
523 std::size_t alignment) noexcept
524 {
525 static_assert(is_raw_allocator<Allocator>::value,
526 "ComposableAllocator must be RawAllocator");
527 return traits_detail::try_deallocate_node(traits_detail::full_concept{}, state,
528 node, size, alignment);
529 }
530
531 static bool try_deallocate_array(allocator_type& state, void* array, std::size_t count,
532 std::size_t size, std::size_t alignment) noexcept
533 {
534 static_assert(is_raw_allocator<Allocator>::value,
535 "ComposableAllocator must be RawAllocator");
536 return traits_detail::try_deallocate_array(traits_detail::full_concept{}, state,
537 array, count, size, alignment);
538 }
539
540 #if !defined(DOXYGEN)
541 using foonathan_memory_default_traits = std::true_type;
542 #endif
543 };
544
545 namespace detail
546 {
547 template <class RawAllocator>
548 typename composable_allocator_traits<RawAllocator>::foonathan_memory_default_traits
549 composable_alloc_uses_default_traits(RawAllocator&);
550
551 std::false_type composable_alloc_uses_default_traits(...);
552
553 template <typename T>
554 struct has_invalid_try_alloc_function
555 : std::is_same<
556 decltype(traits_detail::try_allocate_node(traits_detail::full_concept{},
557 std::declval<typename allocator_traits<
558 T>::allocator_type&>(),
559 0, 0)),
560 traits_detail::error>
561 {
562 };
563
564 template <typename T>
565 struct has_invalid_try_dealloc_function
566 : std::is_same<
567 decltype(
568 traits_detail::try_deallocate_node(traits_detail::full_concept{},
569 std::declval<typename allocator_traits<
570 T>::allocator_type&>(),
571 nullptr, 0, 0)),
572 traits_detail::error>
573 {
574 };
575
576 template <typename T, class DefaultTraits>
577 struct is_composable_allocator : memory::is_raw_allocator<T>
578 {
579 };
580
581 template <typename T>
582 struct is_composable_allocator<T, std::integral_constant<bool, true>>
583 : std::integral_constant<bool, memory::is_raw_allocator<T>::value
584 && !(has_invalid_try_alloc_function<T>::value
585 || has_invalid_try_dealloc_function<T>::value)>
586 {
587 };
588 } // namespace detail
589
590 /// Traits that check whether a type models concept \concept{concept_rawallocator,ComposableAllocator}.<br>
591 /// It must be a \concept{concept_rawallocator,RawAllocator} and either provide the necessary functions for the default traits specialization or has specialized it.
592 /// \ingroup core
593 template <typename T>
594 struct is_composable_allocator
595 : detail::is_composable_allocator<T, decltype(detail::composable_alloc_uses_default_traits(
596 std::declval<T&>()))>
597 {
598 };
599 } // namespace memory
600 } // namespace foonathan
601
602 #endif // FOONATHAN_MEMORY_ALLOCATOR_TRAITS_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 /// \file
5 /// Configuration macros.
6
7 #ifndef FOONATHAN_MEMORY_CONFIG_HPP_INCLUDED
8 #define FOONATHAN_MEMORY_CONFIG_HPP_INCLUDED
9
10 #include <cstddef>
11
12 #if !defined(DOXYGEN)
13 #define FOONATHAN_MEMORY_IMPL_IN_CONFIG_HPP
14 #include "config_impl.hpp"
15 #undef FOONATHAN_MEMORY_IMPL_IN_CONFIG_HPP
16 #endif
17
18 // exception support
19 #ifndef FOONATHAN_HAS_EXCEPTION_SUPPORT
20 #if defined(__GNUC__) && !defined(__EXCEPTIONS)
21 #define FOONATHAN_HAS_EXCEPTION_SUPPORT 0
22 #elif defined(_MSC_VER) && !_HAS_EXCEPTIONS
23 #define FOONATHAN_HAS_EXCEPTION_SUPPORT 0
24 #else
25 #define FOONATHAN_HAS_EXCEPTION_SUPPORT 1
26 #endif
27 #endif
28
29 #if FOONATHAN_HAS_EXCEPTION_SUPPORT
30 #define FOONATHAN_THROW(Ex) throw(Ex)
31 #else
32 #include <cstdlib>
33 #define FOONATHAN_THROW(Ex) ((Ex), std::abort())
34 #endif
35
36 // hosted implementation
37 #ifndef FOONATHAN_HOSTED_IMPLEMENTATION
38 #if !_MSC_VER && !__STDC_HOSTED__
39 #define FOONATHAN_HOSTED_IMPLEMENTATION 0
40 #else
41 #define FOONATHAN_HOSTED_IMPLEMENTATION 1
42 #endif
43 #endif
44
45 // log prefix
46 #define FOONATHAN_MEMORY_LOG_PREFIX "foonathan::memory"
47
48 // version
49 #define FOONATHAN_MEMORY_VERSION \
50 (FOONATHAN_MEMORY_VERSION_MAJOR * 100 + FOONATHAN_MEMORY_VERSION_MINOR)
51
52 // use this macro to mark implementation-defined types
53 // gives it more semantics and useful with doxygen
54 // add PREDEFINED: FOONATHAN_IMPL_DEFINED():=implementation_defined
55 #ifndef FOONATHAN_IMPL_DEFINED
56 #define FOONATHAN_IMPL_DEFINED(...) __VA_ARGS__
57 #endif
58
59 // use this macro to mark base class which only purpose is EBO
60 // gives it more semantics and useful with doxygen
61 // add PREDEFINED: FOONATHAN_EBO():=
62 #ifndef FOONATHAN_EBO
63 #define FOONATHAN_EBO(...) __VA_ARGS__
64 #endif
65
66 #ifndef FOONATHAN_ALIAS_TEMPLATE
67 // defines a template alias
68 // usage:
69 // template <typename T>
70 // FOONATHAN_ALIAS_TEMPLATE(bar, foo<T, int>);
71 // useful for doxygen
72 #ifdef DOXYGEN
73 #define FOONATHAN_ALIAS_TEMPLATE(Name, ...) \
74 class Name : public __VA_ARGS__ \
75 { \
76 }
77 #else
78 #define FOONATHAN_ALIAS_TEMPLATE(Name, ...) using Name = __VA_ARGS__
79 #endif
80 #endif
81
82 #ifdef DOXYGEN
83 // dummy definitions of config macros for doxygen
84
85 /// The major version number.
86 /// \ingroup core
87 #define FOONATHAN_MEMORY_VERSION_MAJOR 1
88
89 /// The minor version number.
90 /// \ingroup core
91 #define FOONATHAN_MEMORY_VERSION_MINOR 1
92
93 /// The total version number of the form \c Mmm.
94 /// \ingroup core
95 #define FOONATHAN_MEMORY_VERSION \
96 (FOONATHAN_MEMORY_VERSION_MAJOR * 100 + FOONATHAN_MEMORY_VERSION_MINOR)
97
98 /// Whether or not the allocation size will be checked,
99 /// i.e. the \ref foonathan::memory::bad_allocation_size thrown.
100 /// \ingroup core
101 #define FOONATHAN_MEMORY_CHECK_ALLOCATION_SIZE 1
102
103 /// Whether or not internal assertions in the library are enabled.
104 /// \ingroup core
105 #define FOONATHAN_MEMORY_DEBUG_ASSERT 1
106
107 /// Whether or not allocated memory will be filled with special values.
108 /// \ingroup core
109 #define FOONATHAN_MEMORY_DEBUG_FILL 1
110
111 /// The size of the fence memory, it has no effect if \ref FOONATHAN_MEMORY_DEBUG_FILL is \c false.
112 /// \note For most allocators, the actual value doesn't matter and they use appropriate defaults to ensure alignment etc.
113 /// \ingroup core
114 #define FOONATHAN_MEMORY_DEBUG_FENCE 1
115
116 /// Whether or not leak checking is enabled.
117 /// \ingroup core
118 #define FOONATHAN_MEMORY_DEBUG_LEAK_CHECK 1
119
120 /// Whether or not the deallocation functions will check for pointers that were never allocated by an allocator.
121 /// \ingroup core
122 #define FOONATHAN_MEMORY_DEBUG_POINTER_CHECK 1
123
124 /// Whether or not the deallocation functions will check for double free errors.
125 /// This option makes no sense if \ref FOONATHAN_MEMORY_DEBUG_POINTER_CHECK is \c false.
126 /// \ingroup core
127 #define FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK 1
128
129 /// Whether or not everything is in namespace <tt>foonathan::memory</tt>.
130 /// If \c false, a namespace alias <tt>namespace memory = foonathan::memory</tt> is automatically inserted into each header,
131 /// allowing to qualify everything with <tt>foonathan::</tt>.
132 /// \note This option breaks in combination with using <tt>using namespace foonathan;</tt>.
133 /// \ingroup core
134 #define FOONATHAN_MEMORY_NAMESPACE_PREFIX 1
135
136 /// The mode of the automatic \ref foonathan::memory::temporary_stack creation.
137 /// Set to `2` to enable automatic lifetime management of the per-thread stack through nifty counter.
138 /// Then all memory will be freed upon program termination automatically.
139 /// Set to `1` to disable automatic lifetime managment of the per-thread stack,
140 /// requires managing it through the \ref foonathan::memory::temporary_stack_initializer.
141 /// Set to `0` to disable the per-thread stack completely.
142 /// \ref get_temporary_stack() will abort the program upon call.
143 /// \ingroup allocator
144 #define FOONATHAN_MEMORY_TEMPORARY_STACK_MODE 2
145 #endif
146
147 #endif // FOONATHAN_MEMORY_CONFIG_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_CONTAINER_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_CONTAINER_HPP_INCLUDED
6
7 /// \file
8 /// Aliasas for STL containers using a certain \concept{concept_rawallocator,RawAllocator}.
9 /// \note Only available on a hosted implementation.
10
11 #include "config.hpp"
12 #if !FOONATHAN_HOSTED_IMPLEMENTATION
13 #error "This header is only available for a hosted implementation."
14 #endif
15
16 #include <functional>
17 #include <utility>
18
19 #include <deque>
20 #include <forward_list>
21 #include <list>
22 #include <map>
23 #include <queue>
24 #include <scoped_allocator>
25 #include <set>
26 #include <stack>
27 #include <string>
28 #include <unordered_map>
29 #include <unordered_set>
30 #include <vector>
31
32 #include "std_allocator.hpp"
33 #include "threading.hpp"
34
35 namespace foonathan
36 {
37 namespace memory
38 {
39 /// \ingroup adapter
40 /// @{
41
42 /// Alias template for an STL container that uses a certain
43 /// \concept{concept_rawallocator,RawAllocator}. It is just a shorthand for a passing in the \c
44 /// RawAllocator wrapped in a \ref foonathan::memory::std_allocator.
45 template <typename T, class RawAllocator>
46 FOONATHAN_ALIAS_TEMPLATE(vector, std::vector<T, std_allocator<T, RawAllocator>>);
47
48 /// Same as above but uses \c std::scoped_allocator_adaptor so the allocator is inherited by all
49 /// nested containers.
50 template <typename T, class RawAllocator>
51 FOONATHAN_ALIAS_TEMPLATE(
52 vector_scoped_alloc,
53 std::vector<T, std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
54
55 /// \copydoc vector
56 template <typename T, class RawAllocator>
57 FOONATHAN_ALIAS_TEMPLATE(deque, std::deque<T, std_allocator<T, RawAllocator>>);
58 /// \copydoc vector_scoped_alloc
59 template <typename T, class RawAllocator>
60 FOONATHAN_ALIAS_TEMPLATE(
61 deque_scoped_alloc,
62 std::deque<T, std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
63
64 /// \copydoc vector
65 template <typename T, class RawAllocator>
66 FOONATHAN_ALIAS_TEMPLATE(list, std::list<T, std_allocator<T, RawAllocator>>);
67 /// \copydoc vector_scoped_alloc
68 template <typename T, class RawAllocator>
69 FOONATHAN_ALIAS_TEMPLATE(
70 list_scoped_alloc,
71 std::list<T, std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
72
73 /// \copydoc vector
74 template <typename T, class RawAllocator>
75 FOONATHAN_ALIAS_TEMPLATE(forward_list,
76 std::forward_list<T, std_allocator<T, RawAllocator>>);
77 /// \copydoc vector_scoped_alloc
78 template <typename T, class RawAllocator>
79 FOONATHAN_ALIAS_TEMPLATE(
80 forward_list_scoped_alloc,
81 std::forward_list<T, std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
82
83 /// \copydoc vector
84 template <typename T, class RawAllocator>
85 FOONATHAN_ALIAS_TEMPLATE(set, std::set<T, std::less<T>, std_allocator<T, RawAllocator>>);
86 /// \copydoc vector_scoped_alloc
87 template <typename T, class RawAllocator>
88 FOONATHAN_ALIAS_TEMPLATE(
89 set_scoped_alloc,
90 std::set<T, std::less<T>,
91 std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
92
93 /// \copydoc vector
94 template <typename T, class RawAllocator>
95 FOONATHAN_ALIAS_TEMPLATE(multiset,
96 std::multiset<T, std::less<T>, std_allocator<T, RawAllocator>>);
97 /// \copydoc vector_scoped_alloc
98 template <typename T, class RawAllocator>
99 FOONATHAN_ALIAS_TEMPLATE(
100 multiset_scoped_alloc,
101 std::multiset<T, std::less<T>,
102 std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
103
104 /// \copydoc vector
105 template <typename Key, typename Value, class RawAllocator>
106 FOONATHAN_ALIAS_TEMPLATE(
107 map, std::map<Key, Value, std::less<Key>,
108 std_allocator<std::pair<const Key, Value>, RawAllocator>>);
109 /// \copydoc vector_scoped_alloc
110 template <typename Key, typename Value, class RawAllocator>
111 FOONATHAN_ALIAS_TEMPLATE(
112 map_scoped_alloc,
113 std::map<Key, Value, std::less<Key>,
114 std::scoped_allocator_adaptor<
115 std_allocator<std::pair<const Key, Value>, RawAllocator>>>);
116
117 /// \copydoc vector
118 template <typename Key, typename Value, class RawAllocator>
119 FOONATHAN_ALIAS_TEMPLATE(
120 multimap, std::multimap<Key, Value, std::less<Key>,
121 std_allocator<std::pair<const Key, Value>, RawAllocator>>);
122 /// \copydoc vector_scoped_alloc
123 template <typename Key, typename Value, class RawAllocator>
124 FOONATHAN_ALIAS_TEMPLATE(
125 multimap_scoped_alloc,
126 std::multimap<Key, Value, std::less<Key>,
127 std::scoped_allocator_adaptor<
128 std_allocator<std::pair<const Key, Value>, RawAllocator>>>);
129
130 /// \copydoc vector
131 template <typename T, class RawAllocator>
132 FOONATHAN_ALIAS_TEMPLATE(
133 unordered_set,
134 std::unordered_set<T, std::hash<T>, std::equal_to<T>, std_allocator<T, RawAllocator>>);
135 /// \copydoc vector_scoped_alloc
136 template <typename T, class RawAllocator>
137 FOONATHAN_ALIAS_TEMPLATE(
138 unordered_set_scoped_alloc,
139 std::unordered_set<T, std::hash<T>, std::equal_to<T>,
140 std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
141
142 /// \copydoc vector
143 template <typename T, class RawAllocator>
144 FOONATHAN_ALIAS_TEMPLATE(unordered_multiset,
145 std::unordered_multiset<T, std::hash<T>, std::equal_to<T>,
146 std_allocator<T, RawAllocator>>);
147 /// \copydoc vector_scoped_alloc
148 template <typename T, class RawAllocator>
149 FOONATHAN_ALIAS_TEMPLATE(
150 unordered_multiset_scoped_alloc,
151 std::unordered_multiset<T, std::hash<T>, std::equal_to<T>,
152 std::scoped_allocator_adaptor<std_allocator<T, RawAllocator>>>);
153
154 /// \copydoc vector
155 template <typename Key, typename Value, class RawAllocator>
156 FOONATHAN_ALIAS_TEMPLATE(
157 unordered_map,
158 std::unordered_map<Key, Value, std::hash<Key>, std::equal_to<Key>,
159 std_allocator<std::pair<const Key, Value>, RawAllocator>>);
160 /// \copydoc vector_scoped_alloc
161 template <typename Key, typename Value, class RawAllocator>
162 FOONATHAN_ALIAS_TEMPLATE(
163 unordered_map_scoped_alloc,
164 std::unordered_map<Key, Value, std::hash<Key>, std::equal_to<Key>,
165 std::scoped_allocator_adaptor<
166 std_allocator<std::pair<const Key, Value>, RawAllocator>>>);
167
168 /// \copydoc vector
169 template <typename Key, typename Value, class RawAllocator>
170 FOONATHAN_ALIAS_TEMPLATE(
171 unordered_multimap,
172 std::unordered_multimap<Key, Value, std::hash<Key>, std::equal_to<Key>,
173 std_allocator<std::pair<const Key, Value>, RawAllocator>>);
174 /// \copydoc vector_scoped_alloc
175 template <typename Key, typename Value, class RawAllocator>
176 FOONATHAN_ALIAS_TEMPLATE(
177 unordered_multimap_scoped_alloc,
178 std::unordered_multimap<Key, Value, std::hash<Key>, std::equal_to<Key>,
179 std::scoped_allocator_adaptor<
180 std_allocator<std::pair<const Key, Value>, RawAllocator>>>);
181
182 /// \copydoc vector
183 template <typename T, class RawAllocator>
184 FOONATHAN_ALIAS_TEMPLATE(stack, std::stack<T, deque<T, RawAllocator>>);
185 /// \copydoc vector_scoped_alloc
186 template <typename T, class RawAllocator>
187 FOONATHAN_ALIAS_TEMPLATE(stack_scoped_alloc,
188 std::stack<T, deque_scoped_alloc<T, RawAllocator>>);
189
190 /// \copydoc vector
191 template <typename T, class RawAllocator>
192 FOONATHAN_ALIAS_TEMPLATE(queue, std::queue<T, deque<T, RawAllocator>>);
193 /// \copydoc vector_scoped_alloc
194 template <typename T, class RawAllocator>
195 FOONATHAN_ALIAS_TEMPLATE(queue_scoped_alloc,
196 std::queue<T, deque_scoped_alloc<T, RawAllocator>>);
197
198 /// \copydoc vector
199 template <typename T, class RawAllocator>
200 FOONATHAN_ALIAS_TEMPLATE(priority_queue, std::priority_queue<T, deque<T, RawAllocator>>);
201 /// \copydoc vector_scoped_alloc
202 template <typename T, class RawAllocator>
203 FOONATHAN_ALIAS_TEMPLATE(priority_queue_scoped_alloc,
204 std::priority_queue<T, deque_scoped_alloc<T, RawAllocator>>);
205
206 /// \copydoc vector
207 template <class RawAllocator>
208 FOONATHAN_ALIAS_TEMPLATE(
209 string,
210 std::basic_string<char, std::char_traits<char>, std_allocator<char, RawAllocator>>);
211 /// @}
212
213 /// @{
214 /// Convenience function to create a container adapter using a certain
215 /// \concept{concept_rawallocator,RawAllocator}. \returns An empty adapter with an
216 /// implementation container using a reference to a given allocator. \ingroup adapter
217 template <typename T, class RawAllocator, class Container = deque<T, RawAllocator>>
218 std::stack<T, Container> make_stack(RawAllocator& allocator)
219 {
220 return std::stack<T, Container>{Container(allocator)};
221 }
222
223 /// \copydoc make_stack
224 template <typename T, class RawAllocator, class Container = deque<T, RawAllocator>>
225 std::queue<T, Container> make_queue(RawAllocator& allocator)
226 {
227 return std::queue<T, Container>{Container(allocator)};
228 }
229
230 /// \copydoc make_stack
231 template <typename T, class RawAllocator, class Container = deque<T, RawAllocator>,
232 class Compare = std::less<T>>
233 std::priority_queue<T, Container, Compare> make_priority_queue(RawAllocator& allocator,
234 Compare comp = {})
235 {
236 return std::priority_queue<T, Container, Compare>{detail::move(comp),
237 Container(allocator)};
238 }
239 /// @}
240
241 #if !defined(DOXYGEN)
242
243 #include "detail/container_node_sizes.hpp"
244
245 #if !defined(FOONATHAN_MEMORY_NO_NODE_SIZE)
246 /// \exclude
247 namespace detail
248 {
249 template <typename T, class StdAllocator>
250 struct shared_ptr_node_size
251 {
252 static_assert(sizeof(T) != sizeof(T), "unsupported allocator type");
253 };
254
255 template <typename T, class RawAllocator>
256 struct shared_ptr_node_size<T, std_allocator<T, RawAllocator>>
257 : std::conditional<allocator_traits<RawAllocator>::is_stateful::value,
258 memory::shared_ptr_stateful_node_size<T>,
259 memory::shared_ptr_stateless_node_size<T>>::type
260 {
261 static_assert(sizeof(std_allocator<T, RawAllocator>) <= sizeof(void*),
262 "fix node size debugger");
263 };
264
265 } // namespace detail
266
267 template <typename T, class StdAllocator>
268 struct shared_ptr_node_size : detail::shared_ptr_node_size<T, StdAllocator>
269 {
270 };
271 #endif
272
273 #else
274 /// \ingroup adapter
275 /// @{
276
277 /// Contains the node size of a node based STL container with a specific type.
278 /// These classes are auto-generated and only available if the tools are build and without
279 /// cross-compiling.
280 template <typename T>
281 struct forward_list_node_size : std::integral_constant<std::size_t, implementation_defined>
282 {
283 };
284
285 /// \copydoc forward_list_node_size
286 template <typename T>
287 struct list_node_size : std::integral_constant<std::size_t, implementation_defined>
288 {
289 };
290
291 /// \copydoc forward_list_node_size
292 template <typename T>
293 struct set_node_size : std::integral_constant<std::size_t, implementation_defined>
294 {
295 };
296
297 /// \copydoc forward_list_node_size
298 template <typename T>
299 struct multiset_node_size : std::integral_constant<std::size_t, implementation_defined>
300 {
301 };
302
303 /// \copydoc forward_list_node_size
304 template <typename T>
305 struct unordered_set_node_size : std::integral_constant<std::size_t, implementation_defined>
306 {
307 };
308
309 /// \copydoc forward_list_node_size
310 template <typename T>
311 struct unordered_multiset_node_size
312 : std::integral_constant<std::size_t, implementation_defined>
313 {
314 };
315
316 /// \copydoc forward_list_node_size
317 template <typename T>
318 struct map_node_size : std::integral_constant<std::size_t, implementation_defined>
319 {
320 };
321
322 /// \copydoc forward_list_node_size
323 template <typename T>
324 struct multimap_node_size : std::integral_constant<std::size_t, implementation_defined>
325 {
326 };
327
328 /// \copydoc forward_list_node_size
329 template <typename T>
330 struct unordered_map_node_size : std::integral_constant<std::size_t, implementation_defined>
331 {
332 };
333
334 /// \copydoc forward_list_node_size
335 template <typename T>
336 struct unordered_multimap_node_size
337 : std::integral_constant<std::size_t, implementation_defined>
338 {
339 };
340
341 /// \copydoc forward_list_node_size
342 template <typename T, class StdAllocator>
343 struct shared_ptr_node_size : std::integral_constant<std::size_t, implementation_defined>
344 {
345 };
346 /// @}
347 #endif
348
349 #if !defined(FOONATHAN_MEMORY_NO_NODE_SIZE)
350 /// The node size required by \ref allocate_shared.
351 /// \note This is similar to \ref shared_ptr_node_size but takes a
352 /// \concept{concept_rawallocator,RawAllocator} instead.
353 template <typename T, class RawAllocator>
354 struct allocate_shared_node_size : shared_ptr_node_size<T, std_allocator<T, RawAllocator>>
355 {
356 };
357 #endif
358 } // namespace memory
359 } // namespace foonathan
360
361 #endif // FOONATHAN_MEMORY_CONTAINER_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DEBUGGING_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DEBUGGING_HPP_INCLUDED
6
7 /// \file
8 /// Debugging facilities.
9
10 #include "config.hpp"
11
12 namespace foonathan
13 {
14 namespace memory
15 {
16 struct allocator_info;
17
18 /// The magic values that are used for debug filling.
19 /// If \ref FOONATHAN_MEMORY_DEBUG_FILL is \c true, memory will be filled to help detect use-after-free or missing initialization errors.
20 /// These are the constants for the different types.
21 /// \ingroup core
22 enum class debug_magic : unsigned char
23 {
24 /// Marks internal memory used by the allocator - "allocated block".
25 internal_memory = 0xAB,
26 /// Marks internal memory currently not used by the allocator - "freed block".
27 internal_freed_memory = 0xFB,
28 /// Marks allocated, but not yet used memory - "clean memory".
29 new_memory = 0xCD,
30 /// Marks freed memory - "dead memory".
31 freed_memory = 0xDD,
32 /// Marks buffer memory used to ensure proper alignment.
33 /// This memory can also serve as \ref debug_magic::fence_memory.
34 alignment_memory = 0xED,
35 /// Marks buffer memory used to protect against overflow - "fence memory".
36 /// The option \ref FOONATHAN_MEMORY_DEBUG_FENCE controls the size of a memory fence that will be placed before or after a memory block.
37 /// It helps catching buffer overflows.
38 fence_memory = 0xFD
39 };
40
41 /// The type of the handler called when a memory leak is detected.
42 /// Leak checking can be controlled via the option \ref FOONATHAN_MEMORY_DEBUG_LEAK_CHECK
43 /// and only affects calls through the \ref allocator_traits, not direct calls.
44 /// The handler gets the \ref allocator_info and the amount of memory leaked.
45 /// This can also be negative, meaning that more memory has been freed than allocated.
46 /// \requiredbe A leak handler shall log the leak, abort the program, do nothing or anything else that seems appropriate.
47 /// It must not throw any exceptions since it is called in the cleanup process.
48 /// \defaultbe On a hosted implementation it logs the leak to \c stderr and returns, continuing execution.
49 /// On a freestanding implementation it does nothing.
50 /// \ingroup core
51 using leak_handler = void (*)(const allocator_info& info, std::ptrdiff_t amount);
52
53 /// Exchanges the \ref leak_handler.
54 /// \effects Sets \c h as the new \ref leak_handler in an atomic operation.
55 /// A \c nullptr sets the default \ref leak_handler.
56 /// \returns The previous \ref leak_handler. This is never \c nullptr.
57 /// \ingroup core
58 leak_handler set_leak_handler(leak_handler h);
59
60 /// Returns the \ref leak_handler.
61 /// \returns The current \ref leak_handler. This is never \c nullptr.
62 /// \ingroup core
63 leak_handler get_leak_handler();
64
65 /// The type of the handler called when an invalid pointer is passed to a deallocation function.
66 /// Pointer checking can be controlled via the options \ref FOONATHAN_MEMORY_DEBUG_POINTER_CHECK and \ref FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK.
67 /// The handler gets the \ref allocator_info and the invalid pointer.
68 /// \requiredbe An invalid pointer handler shall terminate the program.
69 /// It must not throw any exceptions since it might be called in the cleanup process.
70 /// \defaultbe On a hosted implementation it logs the information to \c stderr and calls \c std::abort().
71 /// On a freestanding implementation it only calls \c std::abort().
72 /// \ingroup core
73 using invalid_pointer_handler = void (*)(const allocator_info& info, const void* ptr);
74
75 /// Exchanges the \ref invalid_pointer_handler.
76 /// \effects Sets \c h as the new \ref invalid_pointer_handler in an atomic operation.
77 /// A \c nullptr sets the default \ref invalid_pointer_handler.
78 /// \returns The previous \ref invalid_pointer_handler. This is never \c nullptr.
79 /// \ingroup core
80 invalid_pointer_handler set_invalid_pointer_handler(invalid_pointer_handler h);
81
82 /// Returns the \ref invalid_pointer_handler.
83 /// \returns The current \ref invalid_pointer_handler. This is never \c nullptr.
84 /// \ingroup core
85 invalid_pointer_handler get_invalid_pointer_handler();
86
87 /// The type of the handler called when a buffer under/overflow is detected.
88 /// If \ref FOONATHAN_MEMORY_DEBUG_FILL is \c true and \ref FOONATHAN_MEMORY_DEBUG_FENCE has a non-zero value
89 /// the allocator classes check if a write into the fence has occured upon deallocation.
90 /// The handler gets the memory block belonging to the corrupted fence, its size and the exact address.
91 /// \requiredbe A buffer overflow handler shall terminate the program.
92 /// It must not throw any exceptions since it me be called in the cleanup process.
93 /// \defaultbe On a hosted implementation it logs the information to \c stderr and calls \c std::abort().
94 /// On a freestanding implementation it only calls \c std::abort().
95 /// \ingroup core
96 using buffer_overflow_handler = void (*)(const void* memory, std::size_t size,
97 const void* write_ptr);
98
99 /// Exchanges the \ref buffer_overflow_handler.
100 /// \effects Sets \c h as the new \ref buffer_overflow_handler in an atomic operation.
101 /// A \c nullptr sets the default \ref buffer_overflow_handler.
102 /// \returns The previous \ref buffer_overflow_handler. This is never \c nullptr.
103 /// \ingroup core
104 buffer_overflow_handler set_buffer_overflow_handler(buffer_overflow_handler h);
105
106 /// Returns the \ref buffer_overflow_handler.
107 /// \returns The current \ref buffer_overflow_handler. This is never \c nullptr.
108 /// \ingroup core
109 buffer_overflow_handler get_buffer_overflow_handler();
110 } // namespace memory
111 } // namespace foonathan
112
113 #endif // FOONATHAN_MEMORY_DEBUGGING_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DEFAULT_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DEFAULT_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// The typedef \ref foonathan::memory::default_allocator.
9
10 #include "config.hpp"
11 #include "heap_allocator.hpp"
12 #include "new_allocator.hpp"
13 #include "static_allocator.hpp"
14 #include "virtual_memory.hpp"
15
16 #if FOONATHAN_HOSTED_IMPLEMENTATION
17 #include "malloc_allocator.hpp"
18 #endif
19
20 namespace foonathan
21 {
22 namespace memory
23 {
24 /// The default \concept{concept_rawallocator,RawAllocator} that will be used as \concept{concept_blockallocator,BlockAllocator} in memory arenas.
25 /// Arena allocators like \ref memory_stack or \ref memory_pool allocate memory by subdividing a huge block.
26 /// They get a \concept{concept_blockallocator,BlockAllocator} that will be used for their internal allocation,
27 /// this type is the default value.
28 /// \requiredbe Its type can be changed via the CMake option \c FOONATHAN_MEMORY_DEFAULT_ALLCOATOR,
29 /// but it must be one of the following: \ref heap_allocator, \ref new_allocator, \ref malloc_allocator, \ref static_allocator, \ref virtual_memory_allocator.
30 /// \defaultbe The default is \ref heap_allocator.
31 /// \ingroup allocator
32 using default_allocator = FOONATHAN_IMPL_DEFINED(FOONATHAN_MEMORY_IMPL_DEFAULT_ALLOCATOR);
33 } // namespace memory
34 } // namespace foonathan
35
36 #endif // FOONATHAN_MEMORY_DEFAULT_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DELETER_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DELETER_HPP_INCLUDED
6
7 /// \file
8 /// \c Deleter classes using a \concept{concept_rawallocator,RawAllocator}.
9
10 #include <type_traits>
11
12 #include "allocator_storage.hpp"
13 #include "config.hpp"
14 #include "threading.hpp"
15
16 namespace foonathan
17 {
18 namespace memory
19 {
20 /// A deleter class that deallocates the memory through a specified \concept{concept_rawallocator,RawAllocator}.
21 ///
22 /// It deallocates memory for a specified type but does not call its destructors.
23 /// \ingroup adapter
24 template <typename Type, class RawAllocator>
25 class allocator_deallocator : FOONATHAN_EBO(allocator_reference<RawAllocator>)
26 {
27 static_assert(!std::is_abstract<Type>::value,
28 "use allocator_polymorphic_deallocator for storing base classes");
29
30 public:
31 using allocator_type = typename allocator_reference<RawAllocator>::allocator_type;
32 using value_type = Type;
33
34 /// \effects Creates it without any associated allocator.
35 /// The deallocator must not be used if that is the case.
36 /// \notes This functions is useful if you have want to create an empty smart pointer without giving it an allocator.
37 allocator_deallocator() noexcept = default;
38
39 /// \effects Creates it by passing it an \ref allocator_reference.
40 /// It will store the reference to it and uses the referenced allocator object for the deallocation.
41 allocator_deallocator(allocator_reference<RawAllocator> alloc) noexcept
42 : allocator_reference<RawAllocator>(alloc)
43 {
44 }
45
46 /// \effects Deallocates the memory given to it.
47 /// Calls \c deallocate_node(pointer, sizeof(value_type), alignof(value_type)) on the referenced allocator object.
48 /// \requires The deallocator must not have been created by the default constructor.
49 void operator()(value_type* pointer) noexcept
50 {
51 this->deallocate_node(pointer, sizeof(value_type), alignof(value_type));
52 }
53
54 /// \returns The reference to the allocator.
55 /// It has the same type as the call to \ref allocator_reference::get_allocator().
56 /// \requires The deallocator must not be created by the default constructor.
57 auto get_allocator() const noexcept
58 -> decltype(std::declval<allocator_reference<allocator_type>>().get_allocator())
59 {
60 return this->allocator_reference<allocator_type>::get_allocator();
61 }
62 };
63
64 /// Specialization of \ref allocator_deallocator for array types.
65 /// Otherwise the same behavior.
66 /// \ingroup adapter
67 template <typename Type, class RawAllocator>
68 class allocator_deallocator<Type[], RawAllocator>
69 : FOONATHAN_EBO(allocator_reference<RawAllocator>)
70 {
71 static_assert(!std::is_abstract<Type>::value, "must not create polymorphic arrays");
72
73 public:
74 using allocator_type = typename allocator_reference<RawAllocator>::allocator_type;
75 using value_type = Type;
76
77 /// \effects Creates it without any associated allocator.
78 /// The deallocator must not be used if that is the case.
79 /// \notes This functions is useful if you have want to create an empty smart pointer without giving it an allocator.
80 allocator_deallocator() noexcept : size_(0u) {}
81
82 /// \effects Creates it by passing it an \ref allocator_reference and the size of the array that will be deallocated.
83 /// It will store the reference to the allocator and uses the referenced allocator object for the deallocation.
84 allocator_deallocator(allocator_reference<RawAllocator> alloc,
85 std::size_t size) noexcept
86 : allocator_reference<RawAllocator>(alloc), size_(size)
87 {
88 }
89
90 /// \effects Deallocates the memory given to it.
91 /// Calls \c deallocate_array(pointer, size, sizeof(value_type), alignof(value_type))
92 /// on the referenced allocator object with the size given in the constructor.
93 /// \requires The deallocator must not have been created by the default constructor.
94 void operator()(value_type* pointer) noexcept
95 {
96 this->deallocate_array(pointer, size_, sizeof(value_type), alignof(value_type));
97 }
98
99 /// \returns The reference to the allocator.
100 /// It has the same type as the call to \ref allocator_reference::get_allocator().
101 /// \requires The deallocator must not have been created by the default constructor.
102 auto get_allocator() const noexcept
103 -> decltype(std::declval<allocator_reference<allocator_type>>().get_allocator())
104 {
105 return this->allocator_reference<allocator_type>::get_allocator();
106 }
107
108 /// \returns The size of the array that will be deallocated.
109 /// This is the same value as passed in the constructor, or `0` if it was created by the default constructor.
110 std::size_t array_size() const noexcept
111 {
112 return size_;
113 }
114
115 private:
116 std::size_t size_;
117 };
118
119 /// A deleter class that deallocates the memory of a derived type through a specified \concept{concept_rawallocator,RawAllocator}.
120 ///
121 /// It can only be created from a \ref allocator_deallocator and thus must only be used for smart pointers initialized by derived-to-base conversion of the pointer.
122 /// \ingroup adapter
123 template <typename BaseType, class RawAllocator>
124 class allocator_polymorphic_deallocator : FOONATHAN_EBO(allocator_reference<RawAllocator>)
125 {
126 public:
127 using allocator_type = typename allocator_reference<RawAllocator>::allocator_type;
128 using value_type = BaseType;
129
130 /// \effects Creates it from a deallocator for a derived type.
131 /// It will deallocate the memory as if done by the derived type.
132 template <typename T, FOONATHAN_REQUIRES((std::is_base_of<BaseType, T>::value))>
133 allocator_polymorphic_deallocator(allocator_deallocator<T, RawAllocator> dealloc)
134 : allocator_reference<RawAllocator>(dealloc.get_allocator()),
135 derived_size_(sizeof(T)),
136 derived_alignment_(alignof(T))
137 {
138 }
139
140 /// \effects Deallocates the memory given to it.
141 /// Calls \c deallocate_node(pointer, size, alignment) on the referenced allocator object,
142 /// where \c size and \c alignment are the values of the type it was created with.
143 void operator()(value_type* pointer) noexcept
144 {
145 this->deallocate_node(pointer, derived_size_, derived_alignment_);
146 }
147
148 /// \returns The reference to the allocator.
149 /// It has the same type as the call to \ref allocator_reference::get_allocator().
150 auto get_allocator() const noexcept
151 -> decltype(std::declval<allocator_reference<allocator_type>>().get_allocator())
152 {
153 return this->allocator_reference<allocator_type>::get_allocator();
154 }
155
156 private:
157 std::size_t derived_size_, derived_alignment_;
158 };
159
160 /// Similar to \ref allocator_deallocator but calls the destructors of the object.
161 /// Otherwise behaves the same.
162 /// \ingroup adapter
163 template <typename Type, class RawAllocator>
164 class allocator_deleter : FOONATHAN_EBO(allocator_reference<RawAllocator>)
165 {
166 static_assert(!std::is_abstract<Type>::value,
167 "use allocator_polymorphic_deleter for storing base classes");
168
169 public:
170 using allocator_type = typename allocator_reference<RawAllocator>::allocator_type;
171 using value_type = Type;
172
173 /// \effects Creates it without any associated allocator.
174 /// The deleter must not be used if that is the case.
175 /// \notes This functions is useful if you have want to create an empty smart pointer without giving it an allocator.
176 allocator_deleter() noexcept = default;
177
178 /// \effects Creates it by passing it an \ref allocator_reference.
179 /// It will store the reference to it and uses the referenced allocator object for the deallocation.
180 allocator_deleter(allocator_reference<RawAllocator> alloc) noexcept
181 : allocator_reference<RawAllocator>(alloc)
182 {
183 }
184
185 /// \effects Calls the destructor and deallocates the memory given to it.
186 /// Calls \c deallocate_node(pointer, sizeof(value_type), alignof(value_type))
187 /// on the referenced allocator object for the deallocation.
188 /// \requires The deleter must not have been created by the default constructor.
189 void operator()(value_type* pointer) noexcept
190 {
191 pointer->~value_type();
192 this->deallocate_node(pointer, sizeof(value_type), alignof(value_type));
193 }
194
195 /// \returns The reference to the allocator.
196 /// It has the same type as the call to \ref allocator_reference::get_allocator().
197 auto get_allocator() const noexcept
198 -> decltype(std::declval<allocator_reference<allocator_type>>().get_allocator())
199 {
200 return this->allocator_reference<allocator_type>::get_allocator();
201 }
202 };
203
204 /// Specialization of \ref allocator_deleter for array types.
205 /// Otherwise the same behavior.
206 /// \ingroup adapter
207 template <typename Type, class RawAllocator>
208 class allocator_deleter<Type[], RawAllocator>
209 : FOONATHAN_EBO(allocator_reference<RawAllocator>)
210 {
211 static_assert(!std::is_abstract<Type>::value, "must not create polymorphic arrays");
212
213 public:
214 using allocator_type = typename allocator_reference<RawAllocator>::allocator_type;
215 using value_type = Type;
216
217 /// \effects Creates it without any associated allocator.
218 /// The deleter must not be used if that is the case.
219 /// \notes This functions is useful if you have want to create an empty smart pointer without giving it an allocator.
220 allocator_deleter() noexcept : size_(0u) {}
221
222 /// \effects Creates it by passing it an \ref allocator_reference and the size of the array that will be deallocated.
223 /// It will store the reference to the allocator and uses the referenced allocator object for the deallocation.
224 allocator_deleter(allocator_reference<RawAllocator> alloc, std::size_t size) noexcept
225 : allocator_reference<RawAllocator>(alloc), size_(size)
226 {
227 }
228
229 /// \effects Calls the destructors and deallocates the memory given to it.
230 /// Calls \c deallocate_array(pointer, size, sizeof(value_type), alignof(value_type))
231 /// on the referenced allocator object with the size given in the constructor for the deallocation.
232 /// \requires The deleter must not have been created by the default constructor.
233 void operator()(value_type* pointer) noexcept
234 {
235 for (auto cur = pointer; cur != pointer + size_; ++cur)
236 cur->~value_type();
237 this->deallocate_array(pointer, size_, sizeof(value_type), alignof(value_type));
238 }
239
240 /// \returns The reference to the allocator.
241 /// It has the same type as the call to \ref allocator_reference::get_allocator().
242 /// \requires The deleter must not be created by the default constructor.
243 auto get_allocator() const noexcept
244 -> decltype(std::declval<allocator_reference<allocator_type>>().get_allocator())
245 {
246 return this->allocator_reference<allocator_type>::get_allocator();
247 }
248
249 /// \returns The size of the array that will be deallocated.
250 /// This is the same value as passed in the constructor, or `0` if it was created by the default constructor.
251 std::size_t array_size() const noexcept
252 {
253 return size_;
254 }
255
256 private:
257 std::size_t size_;
258 };
259
260 /// Similar to \ref allocator_polymorphic_deallocator but calls the destructors of the object.
261 /// Otherwise behaves the same.
262 /// \note It has a relatively high space overhead, so only use it if you have to.
263 /// \ingroup adapter
264 template <typename BaseType, class RawAllocator>
265 class allocator_polymorphic_deleter : FOONATHAN_EBO(allocator_reference<RawAllocator>)
266 {
267 public:
268 using allocator_type = typename allocator_reference<RawAllocator>::allocator_type;
269 using value_type = BaseType;
270
271 /// \effects Creates it from a deleter for a derived type.
272 /// It will deallocate the memory as if done by the derived type.
273 template <typename T, FOONATHAN_REQUIRES((std::is_base_of<BaseType, T>::value))>
274 allocator_polymorphic_deleter(allocator_deleter<T, RawAllocator> deleter)
275 : allocator_reference<RawAllocator>(deleter.get_allocator()),
276 derived_size_(sizeof(T)),
277 derived_alignment_(alignof(T))
278 {
279 FOONATHAN_MEMORY_ASSERT(std::size_t(derived_size_) == sizeof(T)
280 && std::size_t(derived_alignment_) == alignof(T));
281 }
282
283 /// \effects Deallocates the memory given to it.
284 /// Calls \c deallocate_node(pointer, size, alignment) on the referenced allocator object,
285 /// where \c size and \c alignment are the values of the type it was created with.
286 void operator()(value_type* pointer) noexcept
287 {
288 pointer->~value_type();
289 this->deallocate_node(pointer, derived_size_, derived_alignment_);
290 }
291
292 /// \returns The reference to the allocator.
293 /// It has the same type as the call to \ref allocator_reference::get_allocator().
294 auto get_allocator() const noexcept
295 -> decltype(std::declval<allocator_reference<allocator_type>>().get_allocator())
296 {
297 return this->allocator_reference<allocator_type>::get_allocator();
298 }
299
300 private:
301 unsigned short derived_size_,
302 derived_alignment_; // use unsigned short here to save space
303 };
304 } // namespace memory
305 } // namespace foonathan
306
307 #endif //FOONATHAN_MEMORY_DELETER_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_ALIGN_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_ALIGN_HPP_INCLUDED
6
7 #include <cstdint>
8
9 #include "../config.hpp"
10 #include "assert.hpp"
11
12 namespace foonathan
13 {
14 namespace memory
15 {
16 namespace detail
17 {
18 // whether or not an alignment is valid, i.e. a power of two not zero
19 constexpr bool is_valid_alignment(std::size_t alignment) noexcept
20 {
21 return alignment && (alignment & (alignment - 1)) == 0u;
22 }
23
24 // returns the offset needed to align ptr for given alignment
25 // alignment must be valid
26 inline std::size_t align_offset(std::uintptr_t address, std::size_t alignment) noexcept
27 {
28 FOONATHAN_MEMORY_ASSERT(is_valid_alignment(alignment));
29 auto misaligned = address & (alignment - 1);
30 return misaligned != 0 ? (alignment - misaligned) : 0;
31 }
32 inline std::size_t align_offset(void* ptr, std::size_t alignment) noexcept
33 {
34 return align_offset(reinterpret_cast<std::uintptr_t>(ptr), alignment);
35 }
36
37 // whether or not the pointer is aligned for given alignment
38 // alignment must be valid
39 bool is_aligned(void* ptr, std::size_t alignment) noexcept;
40
41 // maximum alignment value
42 constexpr std::size_t max_alignment = alignof(std::max_align_t);
43 static_assert(is_valid_alignment(max_alignment), "ehm..?");
44
45 // returns the minimum alignment required for a node of given size
46 std::size_t alignment_for(std::size_t size) noexcept;
47 } // namespace detail
48 } // namespace memory
49 } // namespace foonathan
50
51 #endif // FOONATHAN_MEMORY_DETAIL_ALIGN_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_ASSERT_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_ASSERT_HPP_INCLUDED
6
7 #include <cstdlib>
8
9 #include "../config.hpp"
10
11 namespace foonathan
12 {
13 namespace memory
14 {
15 namespace detail
16 {
17 // handles a failed assertion
18 void handle_failed_assert(const char* msg, const char* file, int line,
19 const char* fnc) noexcept;
20
21 void handle_warning(const char* msg, const char* file, int line,
22 const char* fnc) noexcept;
23
24 // note: debug assertion macros don't use fully qualified name
25 // because they should only be used in this library, where the whole namespace is available
26 // can be override via command line definitions
27 #if FOONATHAN_MEMORY_DEBUG_ASSERT && !defined(FOONATHAN_MEMORY_ASSERT)
28 #define FOONATHAN_MEMORY_ASSERT(Expr) \
29 static_cast<void>((Expr) \
30 || (detail::handle_failed_assert("Assertion \"" #Expr "\" failed", __FILE__, \
31 __LINE__, __func__), \
32 true))
33
34 #define FOONATHAN_MEMORY_ASSERT_MSG(Expr, Msg) \
35 static_cast<void>((Expr) \
36 || (detail::handle_failed_assert("Assertion \"" #Expr "\" failed: " Msg, \
37 __FILE__, __LINE__, __func__), \
38 true))
39
40 #define FOONATHAN_MEMORY_UNREACHABLE(Msg) \
41 detail::handle_failed_assert("Unreachable code reached: " Msg, __FILE__, __LINE__, __func__)
42
43 #define FOONATHAN_MEMORY_WARNING(Msg) detail::handle_warning(Msg, __FILE__, __LINE__, __func__)
44
45 #elif !defined(FOONATHAN_MEMORY_ASSERT)
46 #define FOONATHAN_MEMORY_ASSERT(Expr)
47 #define FOONATHAN_MEMORY_ASSERT_MSG(Expr, Msg)
48 #define FOONATHAN_MEMORY_UNREACHABLE(Msg) std::abort()
49 #define FOONATHAN_MEMORY_WARNING(Msg)
50 #endif
51 } // namespace detail
52 } // namespace memory
53 } // namespace foonathan
54
55 #endif // FOONATHAN_MEMORY_DETAIL_ASSERT_HPP_INCLUDED
56
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_CONTAINER_NODE_SIZES_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_CONTAINER_NODE_SIZES_HPP_INCLUDED
6
7 #include "container_node_sizes_impl.hpp"
8
9 #endif //FOONATHAN_MEMORY_DETAIL_CONTAINER_NODE_SIZES_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DEBUG_HELPERS_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DEBUG_HELPERS_HPP_INCLUDED
6
7 #include <atomic>
8 #include <type_traits>
9
10 #include "../config.hpp"
11
12 namespace foonathan
13 {
14 namespace memory
15 {
16 enum class debug_magic : unsigned char;
17 struct allocator_info;
18
19 namespace detail
20 {
21 using debug_fill_enabled = std::integral_constant<bool, FOONATHAN_MEMORY_DEBUG_FILL>;
22 constexpr std::size_t debug_fence_size =
23 FOONATHAN_MEMORY_DEBUG_FILL ? FOONATHAN_MEMORY_DEBUG_FENCE : 0u;
24
25 #if FOONATHAN_MEMORY_DEBUG_FILL
26 // fills size bytes of memory with debug_magic
27 void debug_fill(void* memory, std::size_t size, debug_magic m) noexcept;
28
29 // returns nullptr if memory is filled with debug_magic
30 // else returns pointer to mismatched byte
31 void* debug_is_filled(void* memory, std::size_t size, debug_magic m) noexcept;
32
33 // fills fence, new and fence
34 // returns after fence
35 void* debug_fill_new(void* memory, std::size_t node_size,
36 std::size_t fence_size = debug_fence_size) noexcept;
37
38 // fills free memory and returns memory starting at fence
39 void* debug_fill_free(void* memory, std::size_t node_size,
40 std::size_t fence_size = debug_fence_size) noexcept;
41
42 // fills internal memory
43 void debug_fill_internal(void* memory, std::size_t size, bool free) noexcept;
44 #else
45 inline void debug_fill(void*, std::size_t, debug_magic) noexcept {}
46
47 inline void* debug_is_filled(void*, std::size_t, debug_magic) noexcept
48 {
49 return nullptr;
50 }
51
52 inline void* debug_fill_new(void* memory, std::size_t, std::size_t) noexcept
53 {
54 return memory;
55 }
56
57 inline void* debug_fill_free(void* memory, std::size_t, std::size_t) noexcept
58 {
59 return static_cast<char*>(memory);
60 }
61
62 inline void debug_fill_internal(void*, std::size_t, bool) noexcept {}
63 #endif
64
65 void debug_handle_invalid_ptr(const allocator_info& info, void* ptr);
66
67 // validates given ptr by evaluating the Functor
68 // if the Functor returns false, calls the debug_leak_checker
69 // note: ptr is just used as the information passed to the invalid ptr handler
70 template <class Functor>
71 void debug_check_pointer(Functor condition, const allocator_info& info, void* ptr)
72 {
73 #if FOONATHAN_MEMORY_DEBUG_POINTER_CHECK
74 if (!condition())
75 debug_handle_invalid_ptr(info, ptr);
76 #else
77 (void)ptr;
78 (void)condition;
79 (void)info;
80 #endif
81 }
82
83 // validates ptr by using a more expensive double-dealloc check
84 template <class Functor>
85 void debug_check_double_dealloc(Functor condition, const allocator_info& info,
86 void* ptr)
87 {
88 #if FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK
89 debug_check_pointer(condition, info, ptr);
90 #else
91 (void)condition;
92 (void)info;
93 (void)ptr;
94 #endif
95 }
96
97 void debug_handle_memory_leak(const allocator_info& info, std::ptrdiff_t amount);
98
99 // does no leak checking, null overhead
100 template <class Handler>
101 class no_leak_checker
102 {
103 public:
104 no_leak_checker() noexcept {}
105 no_leak_checker(no_leak_checker&&) noexcept {}
106 ~no_leak_checker() noexcept {}
107
108 no_leak_checker& operator=(no_leak_checker&&) noexcept
109 {
110 return *this;
111 }
112
113 void on_allocate(std::size_t) noexcept {}
114 void on_deallocate(std::size_t) noexcept {}
115 };
116
117 // does leak checking per-object
118 // leak is detected upon destructor
119 template <class Handler>
120 class object_leak_checker : Handler
121 {
122 public:
123 object_leak_checker() noexcept : allocated_(0) {}
124
125 object_leak_checker(object_leak_checker&& other) noexcept
126 : allocated_(other.allocated_)
127 {
128 other.allocated_ = 0;
129 }
130
131 ~object_leak_checker() noexcept
132 {
133 if (allocated_ != 0)
134 this->operator()(allocated_);
135 }
136
137 object_leak_checker& operator=(object_leak_checker&& other) noexcept
138 {
139 allocated_ = other.allocated_;
140 other.allocated_ = 0;
141 return *this;
142 }
143
144 void on_allocate(std::size_t size) noexcept
145 {
146 allocated_ += std::ptrdiff_t(size);
147 }
148
149 void on_deallocate(std::size_t size) noexcept
150 {
151 allocated_ -= std::ptrdiff_t(size);
152 }
153
154 private:
155 std::ptrdiff_t allocated_;
156 };
157
158 // does leak checking on a global basis
159 // call macro FOONATHAN_MEMORY_GLOBAL_LEAK_CHECKER(handler, var_name) in the header
160 // when last counter gets destroyed, leak is detected
161 template <class Handler>
162 class global_leak_checker_impl
163 {
164 public:
165 struct counter : Handler
166 {
167 counter()
168 {
169 ++no_counter_objects_;
170 }
171
172 ~counter()
173 {
174 --no_counter_objects_;
175 if (no_counter_objects_ == 0u && allocated_ != 0u)
176 this->operator()(allocated_);
177 }
178 };
179
180 global_leak_checker_impl() noexcept {}
181 global_leak_checker_impl(global_leak_checker_impl&&) noexcept {}
182 ~global_leak_checker_impl() noexcept {}
183
184 global_leak_checker_impl& operator=(global_leak_checker_impl&&) noexcept
185 {
186 return *this;
187 }
188
189 void on_allocate(std::size_t size) noexcept
190 {
191 allocated_ += std::ptrdiff_t(size);
192 }
193
194 void on_deallocate(std::size_t size) noexcept
195 {
196 allocated_ -= std::ptrdiff_t(size);
197 }
198
199 private:
200 static std::atomic<std::size_t> no_counter_objects_;
201 static std::atomic<std::ptrdiff_t> allocated_;
202 };
203
204 template <class Handler>
205 std::atomic<std::size_t> global_leak_checker_impl<Handler>::no_counter_objects_(0u);
206
207 template <class Handler>
208 std::atomic<std::ptrdiff_t> global_leak_checker_impl<Handler>::allocated_(0);
209
210 #if FOONATHAN_MEMORY_DEBUG_LEAK_CHECK
211 template <class Handler>
212 using global_leak_checker = global_leak_checker_impl<Handler>;
213
214 #define FOONATHAN_MEMORY_GLOBAL_LEAK_CHECKER(handler, var_name) \
215 static foonathan::memory::detail::global_leak_checker<handler>::counter var_name;
216 #else
217 template <class Handler>
218 using global_leak_checker = no_leak_checker<int>; // only one instantiation
219
220 #define FOONATHAN_MEMORY_GLOBAL_LEAK_CHECKER(handler, var_name)
221 #endif
222
223 #if FOONATHAN_MEMORY_DEBUG_LEAK_CHECK
224 template <class Handler>
225 using default_leak_checker = object_leak_checker<Handler>;
226 #else
227 template <class Handler>
228 using default_leak_checker = no_leak_checker<Handler>;
229 #endif
230 } // namespace detail
231 } // namespace memory
232 } // namespace foonathan
233
234 #endif // FOONATHAN_MEMORY_DEBUG_HELPERS_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_EBO_STORAGE_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_EBO_STORAGE_HPP_INCLUDED
6
7 #include "utility.hpp"
8 #include "../config.hpp"
9
10 namespace foonathan
11 {
12 namespace memory
13 {
14 namespace detail
15 {
16 template <int Tag, typename T>
17 class ebo_storage : T
18 {
19 protected:
20 ebo_storage(const T& t) : T(t) {}
21
22 ebo_storage(T&& t) noexcept(std::is_nothrow_move_constructible<T>::value)
23 : T(detail::move(t))
24 {
25 }
26
27 T& get() noexcept
28 {
29 return *this;
30 }
31
32 const T& get() const noexcept
33 {
34 return *this;
35 }
36 };
37 } // namespace detail
38 } // namespace memory
39 } // namespace foonathan
40
41 #endif // FOONATHAN_MEMORY_DETAIL_EBO_STORAGE_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAILL_FREE_LIST_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAILL_FREE_LIST_HPP_INCLUDED
6
7 #include <cstddef>
8 #include <cstdint>
9
10 #include "align.hpp"
11 #include "utility.hpp"
12 #include "../config.hpp"
13
14 namespace foonathan
15 {
16 namespace memory
17 {
18 namespace detail
19 {
20 // stores free blocks for a memory pool
21 // memory blocks are fragmented and stored in a list
22 // debug: fills memory and uses a bigger node_size for fence memory
23 class free_memory_list
24 {
25 public:
26 // minimum element size
27 static constexpr auto min_element_size = sizeof(char*);
28 // alignment
29 static constexpr auto min_element_alignment = alignof(char*);
30
31 //=== constructor ===//
32 free_memory_list(std::size_t node_size) noexcept;
33
34 // calls other constructor plus insert
35 free_memory_list(std::size_t node_size, void* mem,
36 std::size_t size) noexcept;
37
38 free_memory_list(free_memory_list&& other) noexcept;
39 ~free_memory_list() noexcept = default;
40
41 free_memory_list& operator=(free_memory_list&& other) noexcept;
42
43 friend void swap(free_memory_list& a, free_memory_list& b) noexcept;
44
45 //=== insert/allocation/deallocation ===//
46 // inserts a new memory block, by splitting it up and setting the links
47 // does not own memory!
48 // mem must be aligned for alignment()
49 // pre: size != 0
50 void insert(void* mem, std::size_t size) noexcept;
51
52 // returns the usable size
53 // i.e. how many memory will be actually inserted and usable on a call to insert()
54 std::size_t usable_size(std::size_t size) const noexcept
55 {
56 return size;
57 }
58
59 // returns a single block from the list
60 // pre: !empty()
61 void* allocate() noexcept;
62
63 // returns a memory block big enough for n bytes
64 // might fail even if capacity is sufficient
65 void* allocate(std::size_t n) noexcept;
66
67 // deallocates a single block
68 void deallocate(void* ptr) noexcept;
69
70 // deallocates multiple blocks with n bytes total
71 void deallocate(void* ptr, std::size_t n) noexcept;
72
73 //=== getter ===//
74 std::size_t node_size() const noexcept
75 {
76 return node_size_;
77 }
78
79 // alignment of all nodes
80 std::size_t alignment() const noexcept;
81
82 // number of nodes remaining
83 std::size_t capacity() const noexcept
84 {
85 return capacity_;
86 }
87
88 bool empty() const noexcept
89 {
90 return first_ == nullptr;
91 }
92
93 private:
94 std::size_t fence_size() const noexcept;
95 void insert_impl(void* mem, std::size_t size) noexcept;
96
97 char* first_;
98 std::size_t node_size_, capacity_;
99 };
100
101 void swap(free_memory_list& a, free_memory_list& b) noexcept;
102
103 // same as above but keeps the nodes ordered
104 // this allows array allocations, that is, consecutive nodes
105 // debug: fills memory and uses a bigger node_size for fence memory
106 class ordered_free_memory_list
107 {
108 public:
109 // minimum element size
110 static constexpr auto min_element_size = sizeof(char*);
111 // alignment
112 static constexpr auto min_element_alignment = alignof(char*);
113
114 //=== constructor ===//
115 ordered_free_memory_list(std::size_t node_size) noexcept;
116
117 ordered_free_memory_list(std::size_t node_size, void* mem,
118 std::size_t size) noexcept
119 : ordered_free_memory_list(node_size)
120 {
121 insert(mem, size);
122 }
123
124 ordered_free_memory_list(ordered_free_memory_list&& other) noexcept;
125
126 ~ordered_free_memory_list() noexcept = default;
127
128 ordered_free_memory_list& operator=(ordered_free_memory_list&& other)
129 noexcept
130 {
131 ordered_free_memory_list tmp(detail::move(other));
132 swap(*this, tmp);
133 return *this;
134 }
135
136 friend void swap(ordered_free_memory_list& a,
137 ordered_free_memory_list& b) noexcept;
138
139 //=== insert/allocation/deallocation ===//
140 // inserts a new memory block, by splitting it up and setting the links
141 // does not own memory!
142 // mem must be aligned for alignment()
143 // pre: size != 0
144 void insert(void* mem, std::size_t size) noexcept;
145
146 // returns the usable size
147 // i.e. how many memory will be actually inserted and usable on a call to insert()
148 std::size_t usable_size(std::size_t size) const noexcept
149 {
150 return size;
151 }
152
153 // returns a single block from the list
154 // pre: !empty()
155 void* allocate() noexcept;
156
157 // returns a memory block big enough for n bytes (!, not nodes)
158 // might fail even if capacity is sufficient
159 void* allocate(std::size_t n) noexcept;
160
161 // deallocates a single block
162 void deallocate(void* ptr) noexcept;
163
164 // deallocates multiple blocks with n bytes total
165 void deallocate(void* ptr, std::size_t n) noexcept;
166
167 //=== getter ===//
168 std::size_t node_size() const noexcept
169 {
170 return node_size_;
171 }
172
173 // alignment of all nodes
174 std::size_t alignment() const noexcept;
175
176 // number of nodes remaining
177 std::size_t capacity() const noexcept
178 {
179 return capacity_;
180 }
181
182 bool empty() const noexcept
183 {
184 return capacity_ == 0u;
185 }
186
187 private:
188 std::size_t fence_size() const noexcept;
189
190 // returns previous pointer
191 char* insert_impl(void* mem, std::size_t size) noexcept;
192
193 char* begin_node() noexcept;
194 char* end_node() noexcept;
195
196 std::uintptr_t begin_proxy_, end_proxy_;
197 std::size_t node_size_, capacity_;
198 char * last_dealloc_, *last_dealloc_prev_;
199 };
200
201 void swap(ordered_free_memory_list& a, ordered_free_memory_list& b) noexcept;
202
203 #if FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECk
204 // use ordered version to allow pointer check
205 using node_free_memory_list = ordered_free_memory_list;
206 using array_free_memory_list = ordered_free_memory_list;
207 #else
208 using node_free_memory_list = free_memory_list;
209 using array_free_memory_list = ordered_free_memory_list;
210 #endif
211 } // namespace detail
212 }
213 } // namespace foonathan::memory
214
215 #endif // FOONATHAN_MEMORY_DETAILL_FREE_LIST_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_FREE_LIST_ARRAY_HPP
5 #define FOONATHAN_MEMORY_DETAIL_FREE_LIST_ARRAY_HPP
6
7 #include "align.hpp"
8 #include "assert.hpp"
9 #include "memory_stack.hpp"
10 #include "../config.hpp"
11
12 namespace foonathan
13 {
14 namespace memory
15 {
16 namespace detail
17 {
18 // an array of free_memory_list types
19 // indexed via size, AccessPolicy does necessary conversions
20 // requires trivial destructible FreeList type
21 template <class FreeList, class AccessPolicy>
22 class free_list_array
23 {
24 // not supported on GCC 4.7
25 //static_assert(std::is_trivially_destructible<FreeList>::value,
26 // "free list must be trivially destructible");
27 public:
28 // creates sufficient elements to support up to given maximum node size
29 // all lists are initially empty
30 // actual number is calculated via policy
31 // memory is taken from fixed_memory_stack, it must be sufficient
32 free_list_array(fixed_memory_stack& stack, const char* end,
33 std::size_t max_node_size) noexcept
34 : no_elements_(AccessPolicy::index_from_size(max_node_size) - min_size_index + 1)
35 {
36 array_ = static_cast<FreeList*>(
37 stack.allocate(end, no_elements_ * sizeof(FreeList), alignof(FreeList)));
38 FOONATHAN_MEMORY_ASSERT_MSG(array_, "insufficient memory for free lists");
39 for (std::size_t i = 0u; i != no_elements_; ++i)
40 {
41 auto node_size = AccessPolicy::size_from_index(i + min_size_index);
42 ::new (static_cast<void*>(array_ + i)) FreeList(node_size);
43 }
44 }
45
46 // move constructor, does not actually move the elements, just the pointer
47 free_list_array(free_list_array&& other) noexcept
48 : array_(other.array_), no_elements_(other.no_elements_)
49 {
50 other.array_ = nullptr;
51 other.no_elements_ = 0u;
52 }
53
54 // destructor, does nothing, list must be trivially destructible!
55 ~free_list_array() noexcept = default;
56
57 free_list_array& operator=(free_list_array&& other) noexcept
58 {
59 array_ = other.array_;
60 no_elements_ = other.no_elements_;
61
62 other.array_ = nullptr;
63 other.no_elements_ = 0u;
64 return *this;
65 }
66
67 // access free list for given size
68 FreeList& get(std::size_t node_size) const noexcept
69 {
70 auto i = AccessPolicy::index_from_size(node_size);
71 if (i < min_size_index)
72 i = min_size_index;
73 return array_[i - min_size_index];
74 }
75
76 // number of free lists
77 std::size_t size() const noexcept
78 {
79 return no_elements_;
80 }
81
82 // maximum supported node size
83 std::size_t max_node_size() const noexcept
84 {
85 return AccessPolicy::size_from_index(no_elements_ + min_size_index - 1);
86 }
87
88 private:
89 static const std::size_t min_size_index;
90
91 FreeList* array_;
92 std::size_t no_elements_;
93 };
94
95 template <class FL, class AP>
96 const std::size_t free_list_array<FL, AP>::min_size_index =
97 AP::index_from_size(FL::min_element_size);
98
99 // AccessPolicy that maps size to indices 1:1
100 // creates a free list for each size!
101 struct identity_access_policy
102 {
103 static std::size_t index_from_size(std::size_t size) noexcept
104 {
105 return size;
106 }
107
108 static std::size_t size_from_index(std::size_t index) noexcept
109 {
110 return index;
111 }
112 };
113
114 // AccessPolicy that maps sizes to the integral log2
115 // this creates more nodes and never wastes more than half the size
116 struct log2_access_policy
117 {
118 static std::size_t index_from_size(std::size_t size) noexcept;
119 static std::size_t size_from_index(std::size_t index) noexcept;
120 };
121 } // namespace detail
122 } // namespace memory
123 } // namespace foonathan
124
125 #endif //FOONATHAN_MEMORY_DETAIL_FREE_LIST_ARRAY_HPP
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_ILOG2_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_ILOG2_HPP_INCLUDED
6
7 #include <climits>
8 #include <cstdint>
9
10 #include "config.hpp"
11
12 namespace foonathan
13 {
14 namespace memory
15 {
16 namespace detail
17 {
18 // undefined for 0
19 template <typename UInt>
20 constexpr bool is_power_of_two(UInt x)
21 {
22 return (x & (x - 1)) == 0;
23 }
24
25 inline std::size_t ilog2_base(std::uint64_t x)
26 {
27 #if defined(__GNUC__)
28 unsigned long long value = x;
29 return sizeof(value) * CHAR_BIT - __builtin_clzll(value);
30 #else
31 // Adapted from https://stackoverflow.com/a/40943402
32 std::size_t clz = 64;
33 std::size_t c = 32;
34 do
35 {
36 auto tmp = x >> c;
37 if (tmp != 0)
38 {
39 clz -= c;
40 x = tmp;
41 }
42 c = c >> 1;
43 } while (c != 0);
44 clz -= x ? 1 : 0;
45
46 return 64 - clz;
47 #endif
48 }
49
50 // ilog2() implementation, cuts part after the comma
51 // e.g. 1 -> 0, 2 -> 1, 3 -> 1, 4 -> 2, 5 -> 2
52 inline std::size_t ilog2(std::uint64_t x)
53 {
54 return ilog2_base(x) - 1;
55 }
56
57 // ceiling ilog2() implementation, adds one if part after comma
58 // e.g. 1 -> 0, 2 -> 1, 3 -> 2, 4 -> 2, 5 -> 3
59 inline std::size_t ilog2_ceil(std::uint64_t x)
60 {
61 // only subtract one if power of two
62 return ilog2_base(x) - std::size_t(is_power_of_two(x));
63 }
64 } // namespace detail
65 } // namespace memory
66 } // namespace foonathan
67
68 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_LOWLEVEL_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_LOWLEVEL_ALLOCATOR_HPP_INCLUDED
6
7 #include <type_traits>
8
9 #include "../config.hpp"
10 #include "../error.hpp"
11 #include "align.hpp"
12 #include "debug_helpers.hpp"
13 #include "assert.hpp"
14
15 namespace foonathan
16 {
17 namespace memory
18 {
19 namespace detail
20 {
21 template <class Functor>
22 struct lowlevel_allocator_leak_handler
23 {
24 void operator()(std::ptrdiff_t amount)
25 {
26 debug_handle_memory_leak(Functor::info(), amount);
27 }
28 };
29
30 // Functor controls low-level allocation:
31 // static allocator_info info()
32 // static void* allocate(std::size_t size, std::size_t alignment);
33 // static void deallocate(void *memory, std::size_t size, std::size_t alignment);
34 // static std::size_t max_node_size();
35 template <class Functor>
36 class lowlevel_allocator : global_leak_checker<lowlevel_allocator_leak_handler<Functor>>
37 {
38 public:
39 using is_stateful = std::false_type;
40
41 lowlevel_allocator() noexcept
42 {
43 }
44 lowlevel_allocator(lowlevel_allocator&&) noexcept
45 {
46 }
47 ~lowlevel_allocator() noexcept
48 {
49 }
50
51 lowlevel_allocator& operator=(lowlevel_allocator&&) noexcept
52 {
53 return *this;
54 }
55
56 void* allocate_node(std::size_t size, std::size_t alignment)
57 {
58 auto actual_size = size + (debug_fence_size ? 2 * max_alignment : 0u);
59
60 auto memory = Functor::allocate(actual_size, alignment);
61 if (!memory)
62 FOONATHAN_THROW(out_of_memory(Functor::info(), actual_size));
63
64 this->on_allocate(actual_size);
65
66 return debug_fill_new(memory, size, max_alignment);
67 }
68
69 void deallocate_node(void* node, std::size_t size,
70 std::size_t alignment) noexcept
71 {
72 auto actual_size = size + (debug_fence_size ? 2 * max_alignment : 0u);
73
74 auto memory = debug_fill_free(node, size, max_alignment);
75 Functor::deallocate(memory, actual_size, alignment);
76
77 this->on_deallocate(actual_size);
78 }
79
80 std::size_t max_node_size() const noexcept
81 {
82 return Functor::max_node_size();
83 }
84 };
85
86 #define FOONATHAN_MEMORY_LL_ALLOCATOR_LEAK_CHECKER(functor, var_name) \
87 FOONATHAN_MEMORY_GLOBAL_LEAK_CHECKER(lowlevel_allocator_leak_handler<functor>, var_name)
88 } // namespace detail
89 }
90 } // namespace foonathan::memory
91
92 #endif // FOONATHAN_MEMORY_DETAIL_LOWLEVEL_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_MEMORY_STACK_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_MEMORY_STACK_HPP_INCLUDED
6
7 #include <cstddef>
8
9 #include "../config.hpp"
10 #include "align.hpp"
11 #include "debug_helpers.hpp"
12 #include "../debugging.hpp"
13
14 namespace foonathan
15 {
16 namespace memory
17 {
18 namespace detail
19 {
20 // simple memory stack implementation that does not support growing
21 class fixed_memory_stack
22 {
23 public:
24 fixed_memory_stack() noexcept : fixed_memory_stack(nullptr)
25 {
26 }
27
28 // gives it the current pointer, the end pointer must be maintained seperataly
29 explicit fixed_memory_stack(void* memory) noexcept
30 : cur_(static_cast<char*>(memory))
31 {
32 }
33
34 fixed_memory_stack(fixed_memory_stack&& other) noexcept : cur_(other.cur_)
35 {
36 other.cur_ = nullptr;
37 }
38
39 ~fixed_memory_stack() noexcept = default;
40
41 fixed_memory_stack& operator=(fixed_memory_stack&& other) noexcept
42 {
43 cur_ = other.cur_;
44 other.cur_ = nullptr;
45 return *this;
46 }
47
48 // bumps the top pointer without filling it
49 void bump(std::size_t offset) noexcept
50 {
51 cur_ += offset;
52 }
53
54 // bumps the top pointer by offset and fills
55 void bump(std::size_t offset, debug_magic m) noexcept
56 {
57 detail::debug_fill(cur_, offset, m);
58 bump(offset);
59 }
60
61 // same as bump(offset, m) but returns old value
62 void* bump_return(std::size_t offset,
63 debug_magic m = debug_magic::new_memory) noexcept
64 {
65 auto memory = cur_;
66 detail::debug_fill(memory, offset, m);
67 cur_ += offset;
68 return memory;
69 }
70
71 // allocates memory by advancing the stack, returns nullptr if insufficient
72 // debug: mark memory as new_memory, put fence in front and back
73 void* allocate(const char* end, std::size_t size, std::size_t alignment,
74 std::size_t fence_size = debug_fence_size) noexcept
75 {
76 if (cur_ == nullptr)
77 return nullptr;
78
79 auto remaining = std::size_t(end - cur_);
80 auto offset = align_offset(cur_ + fence_size, alignment);
81 if (fence_size + offset + size + fence_size > remaining)
82 return nullptr;
83
84 return allocate_unchecked(size, offset, fence_size);
85 }
86
87 // same as allocate() but does not check the size
88 // note: pass it the align OFFSET, not the alignment
89 void* allocate_unchecked(std::size_t size, std::size_t align_offset,
90 std::size_t fence_size = debug_fence_size)
91 noexcept
92 {
93 bump(fence_size, debug_magic::fence_memory);
94 bump(align_offset, debug_magic::alignment_memory);
95 auto mem = bump_return(size);
96 bump(fence_size, debug_magic::fence_memory);
97 return mem;
98 }
99
100 // unwindws the stack to a certain older position
101 // debug: marks memory from new top to old top as freed
102 // doesn't check for invalid pointer
103 void unwind(char* top) noexcept
104 {
105 debug_fill(top, std::size_t(cur_ - top), debug_magic::freed_memory);
106 cur_ = top;
107 }
108
109 // returns the current top
110 char* top() const noexcept
111 {
112 return cur_;
113 }
114
115 private:
116 char* cur_;
117 };
118 } // namespace detail
119 }
120 } // namespace foonathan::memory
121
122 #endif // FOONATHAN_MEMORY_DETAIL_MEMORY_STACK_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_SMALL_FREE_LIST_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_DETAIL_SMALL_FREE_LIST_HPP_INCLUDED
6
7 #include <cstddef>
8
9 #include "../config.hpp"
10 #include "utility.hpp"
11
12 namespace foonathan
13 {
14 namespace memory
15 {
16 namespace detail
17 {
18 struct chunk_base
19 {
20 chunk_base* prev = this;
21 chunk_base* next = this;
22
23 unsigned char first_free = 0; // first free node for the linked list
24 unsigned char capacity = 0; // total number of free nodes available
25 unsigned char no_nodes = 0; // total number of nodes in memory
26
27 chunk_base() noexcept = default;
28
29 chunk_base(unsigned char no) noexcept : capacity(no), no_nodes(no)
30 {
31 }
32 };
33
34 struct chunk;
35
36 // the same as free_memory_list but optimized for small node sizes
37 // it is slower and does not support arrays
38 // but has very small overhead
39 // debug: allocate() and deallocate() mark memory as new and freed, respectively
40 // node_size is increased via two times fence size and fence is put in front and after
41 class small_free_memory_list
42 {
43 public:
44 // minimum element size
45 static constexpr std::size_t min_element_size = 1;
46 // alignment
47 static constexpr std::size_t min_element_alignment = 1;
48
49 //=== constructor ===//
50 small_free_memory_list(std::size_t node_size) noexcept;
51
52 // does not own memory!
53 small_free_memory_list(std::size_t node_size, void* mem,
54 std::size_t size) noexcept;
55
56 small_free_memory_list(small_free_memory_list&& other) noexcept;
57
58 ~small_free_memory_list() noexcept = default;
59
60 small_free_memory_list& operator=(small_free_memory_list&& other) noexcept
61 {
62 small_free_memory_list tmp(detail::move(other));
63 swap(*this, tmp);
64 return *this;
65 }
66
67 friend void swap(small_free_memory_list& a,
68 small_free_memory_list& b) noexcept;
69
70 //=== insert/alloc/dealloc ===//
71 // inserts new memory of given size into the free list
72 // mem must be aligned for maximum alignment
73 void insert(void* mem, std::size_t size) noexcept;
74
75 // returns the usable size
76 // i.e. how many memory will be actually inserted and usable on a call to insert()
77 std::size_t usable_size(std::size_t size) const noexcept;
78
79 // allocates a node big enough for the node size
80 // pre: !empty()
81 void* allocate() noexcept;
82
83 // always returns nullptr, because array allocations are not supported
84 void* allocate(std::size_t) noexcept
85 {
86 return nullptr;
87 }
88
89 // deallocates the node previously allocated via allocate()
90 void deallocate(void* node) noexcept;
91
92 // forwards to insert()
93 void deallocate(void* mem, std::size_t size) noexcept
94 {
95 insert(mem, size);
96 }
97
98 // hint for allocate() to be prepared to allocate n nodes
99 // it searches for a chunk that has n nodes free
100 // returns false, if there is none like that
101 // never fails for n == 1 if not empty()
102 // pre: capacity() >= n * node_size()
103 bool find_chunk(std::size_t n) noexcept
104 {
105 return find_chunk_impl(n) != nullptr;
106 }
107
108 //=== getter ===//
109 std::size_t node_size() const noexcept
110 {
111 return node_size_;
112 }
113
114 // the alignment of all nodes
115 std::size_t alignment() const noexcept;
116
117 // number of nodes remaining
118 std::size_t capacity() const noexcept
119 {
120 return capacity_;
121 }
122
123 bool empty() const noexcept
124 {
125 return capacity_ == 0u;
126 }
127
128 private:
129 std::size_t fence_size() const noexcept;
130
131 chunk* find_chunk_impl(std::size_t n = 1) noexcept;
132 chunk* find_chunk_impl(unsigned char* node, chunk_base* first,
133 chunk_base* last) noexcept;
134 chunk* find_chunk_impl(unsigned char* node) noexcept;
135
136 chunk_base base_;
137 std::size_t node_size_, capacity_;
138 chunk_base *alloc_chunk_, *dealloc_chunk_;
139 };
140
141 // for some reason, this is required in order to define it
142 void swap(small_free_memory_list& a, small_free_memory_list& b) noexcept;
143 } // namespace detail
144 }
145 } // namespace foonathan::memory
146
147 #endif // FOONATHAN_MEMORY_DETAIL_SMALL_FREE_LIST_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_DETAIL_UTILITY_HPP
5 #define FOONATHAN_MEMORY_DETAIL_UTILITY_HPP
6
7 // implementation of some functions from <utility> to prevent dependencies on it
8
9 #include <type_traits>
10
11 #include "../config.hpp"
12
13 #if FOONATHAN_HOSTED_IMPLEMENTATION
14 #include <utility>
15 #endif
16
17 namespace foonathan
18 {
19 namespace memory
20 {
21 namespace detail
22 {
23 // move - taken from http://stackoverflow.com/a/7518365
24 template <typename T>
25 typename std::remove_reference<T>::type&& move(T&& arg) noexcept
26 {
27 return static_cast<typename std::remove_reference<T>::type&&>(arg);
28 }
29 // forward - taken from http://stackoverflow.com/a/27501759
30 template <class T>
31 T&& forward(typename std::remove_reference<T>::type& t) noexcept
32 {
33 return static_cast<T&&>(t);
34 }
35 template <class T>
36 T&& forward(typename std::remove_reference<T>::type&& t) noexcept
37 {
38 static_assert(!std::is_lvalue_reference<T>::value,
39 "Can not forward an rvalue as an lvalue.");
40 return static_cast<T&&>(t);
41 }
42
43 namespace swap_
44 {
45 #if FOONATHAN_HOSTED_IMPLEMENTATION
46 using std::swap;
47 #else
48 template <typename T>
49 void swap(T& a, T& b)
50 {
51 T tmp = move(a);
52 a = move(b);
53 b = move(tmp);
54 }
55 #endif
56 } // namespace swap_
57
58 // ADL aware swap
59 template <typename T>
60 void adl_swap(T& a, T& b) noexcept
61 {
62 using swap_::swap;
63 swap(a, b);
64 }
65
66 // fancier syntax for enable_if
67 // used as (template) parameter
68 // also useful for doxygen
69 // define PREDEFINED: FOONATHAN_REQUIRES(x):=
70 #define FOONATHAN_REQUIRES(Expr) typename std::enable_if<(Expr), int>::type = 0
71
72 // same as above, but as return type
73 // also useful for doxygen:
74 // defined PREDEFINED: FOONATHAN_REQUIRES_RET(x,r):=r
75 #define FOONATHAN_REQUIRES_RET(Expr, ...) typename std::enable_if<(Expr), __VA_ARGS__>::type
76
77 // fancier syntax for enable_if on non-templated member function
78 #define FOONATHAN_ENABLE_IF(Expr) \
79 template <typename Dummy = std::true_type, FOONATHAN_REQUIRES(Dummy::value && (Expr))>
80
81 // fancier syntax for general expression SFINAE
82 // used as (template) parameter
83 // also useful for doxygen:
84 // define PREDEFINED: FOONATHAN_SFINAE(x):=
85 #define FOONATHAN_SFINAE(Expr) decltype((Expr), int()) = 0
86
87 // avoids code repetition for one-line forwarding functions
88 #define FOONATHAN_AUTO_RETURN(Expr) \
89 decltype(Expr) \
90 { \
91 return Expr; \
92 }
93
94 // same as above, but requires certain type
95 #define FOONATHAN_AUTO_RETURN_TYPE(Expr, T) \
96 decltype(Expr) \
97 { \
98 static_assert(std::is_same<decltype(Expr), T>::value, \
99 #Expr " does not have the return type " #T); \
100 return Expr; \
101 }
102
103 // whether or not a type is an instantiation of a template
104 template <template <typename...> class Template, typename T>
105 struct is_instantiation_of : std::false_type
106 {
107 };
108
109 template <template <typename...> class Template, typename... Args>
110 struct is_instantiation_of<Template, Template<Args...>> : std::true_type
111 {
112 };
113 } // namespace detail
114 } // namespace memory
115 } // namespace foonathan
116
117 #endif //FOONATHAN_MEMORY_DETAIL_UTILITY_HPP
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 /// \file
5 /// The exception classes.
6
7 #ifndef FOONATHAN_MEMORY_ERROR_HPP_INCLUDED
8 #define FOONATHAN_MEMORY_ERROR_HPP_INCLUDED
9
10 #include <cstddef>
11 #include <new>
12
13 #include "config.hpp"
14
15 namespace foonathan
16 {
17 namespace memory
18 {
19 /// Contains information about an allocator.
20 /// It can be used for logging in the various handler functions.
21 /// \ingroup core
22 struct allocator_info
23 {
24 /// The name of the allocator.
25 /// It is a NTBS whose lifetime is not managed by this object,
26 /// it must be stored elsewhere or be a string literal.
27 const char* name;
28
29 /// A pointer representing an allocator.
30 /// It does not necessarily point to the beginning of the allocator object,
31 /// the only guarantee is that different allocator objects result in a different pointer value.
32 /// For stateless allocators it is sometimes \c nullptr.
33 /// \note The pointer must not be cast back to any allocator type.
34 const void* allocator;
35
36 /// \effects Creates it by giving it the name of the allocator and a pointer.
37 constexpr allocator_info(const char* n, const void* alloc) noexcept
38 : name(n), allocator(alloc)
39 {
40 }
41
42 /// @{
43 /// \effects Compares two \ref allocator_info objects, they are equal, if the \ref allocator is the same.
44 /// \returns The result of the comparision.
45 friend constexpr bool operator==(const allocator_info& a,
46 const allocator_info& b) noexcept
47 {
48 return a.allocator == b.allocator;
49 }
50
51 friend constexpr bool operator!=(const allocator_info& a,
52 const allocator_info& b) noexcept
53 {
54 return a.allocator != b.allocator;
55 }
56 /// @}
57 };
58
59 /// The exception class thrown when a low level allocator runs out of memory.
60 /// It is derived from \c std::bad_alloc.
61 /// This can happen if a low level allocation function like \c std::malloc() runs out of memory.
62 /// Throwing can be prohibited by the handler function.
63 /// \ingroup core
64 class out_of_memory : public std::bad_alloc
65 {
66 public:
67 /// The type of the handler called in the constructor of \ref out_of_memory.
68 /// When an out of memory situation is encountered and the exception class created,
69 /// this handler gets called.
70 /// It is especially useful if exception support is disabled.
71 /// It gets the \ref allocator_info and the amount of memory that was tried to be allocated.
72 /// \requiredbe It can log the error, throw a different exception derived from \c std::bad_alloc or abort the program.
73 /// If it returns, this exception object will be created and thrown.
74 /// \defaultbe On a hosted implementation it logs the error on \c stderr and continues execution,
75 /// leading to this exception being thrown.
76 /// On a freestanding implementation it does nothing.
77 /// \note It is different from \c std::new_handler; it will not be called in a loop trying to allocate memory
78 /// or something like that. Its only job is to report the error.
79 using handler = void (*)(const allocator_info& info, std::size_t amount);
80
81 /// \effects Sets \c h as the new \ref handler in an atomic operation.
82 /// A \c nullptr sets the default \ref handler.
83 /// \returns The previous \ref handler. This is never \c nullptr.
84 static handler set_handler(handler h);
85
86 /// \returns The current \ref handler. This is never \c nullptr.
87 static handler get_handler();
88
89 /// \effects Creates it by passing it the \ref allocator_info and the amount of memory failed to be allocated.
90 /// It also calls the \ref handler to control whether or not it will be thrown.
91 out_of_memory(const allocator_info& info, std::size_t amount);
92
93 /// \returns A static NTBS that describes the error.
94 /// It does not contain any specific information since there is no memory for formatting.
95 const char* what() const noexcept override;
96
97 /// \returns The \ref allocator_info passed to it in the constructor.
98 const allocator_info& allocator() const noexcept
99 {
100 return info_;
101 }
102
103 /// \returns The amount of memory that was tried to be allocated.
104 /// This is the value passed in the constructor.
105 std::size_t failed_allocation_size() const noexcept
106 {
107 return amount_;
108 }
109
110 private:
111 allocator_info info_;
112 std::size_t amount_;
113 };
114
115 /// A special case of \ref out_of_memory errors
116 /// thrown when a low-level allocator with a fixed size runs out of memory.
117 /// For example, thrown by \ref fixed_block_allocator or \ref static_allocator.<br>
118 /// It is derived from \ref out_of_memory but does not provide its own handler.
119 /// \ingroup core
120 class out_of_fixed_memory : public out_of_memory
121 {
122 public:
123 /// \effects Just forwards to \ref out_of_memory.
124 out_of_fixed_memory(const allocator_info& info, std::size_t amount)
125 : out_of_memory(info, amount)
126 {
127 }
128
129 /// \returns A static NTBS that describes the error.
130 /// It does not contain any specific information since there is no memory for formatting.
131 const char* what() const noexcept override;
132 };
133
134 /// The exception class thrown when an allocation size is bigger than the supported maximum.
135 /// This size is either the node, array or alignment parameter in a call to an allocation function.
136 /// If those exceed the supported maximum returned by \c max_node_size(), \c max_array_size() or \c max_alignment(),
137 /// one of its derived classes will be thrown or this class if in a situation where the type is unknown.
138 /// It is derived from \c std::bad_alloc.
139 /// Throwing can be prohibited by the handler function.
140 /// \note Even if all parameters are less than the maximum, \ref out_of_memory or a similar exception can be thrown,
141 /// because the maximum functions return an upper bound and not the actual supported maximum size,
142 /// since it always depends on fence memory, alignment buffer and the like.
143 /// \note A user should only \c catch for \c bad_allocation_size, not the derived classes.
144 /// \note Most checks will only be done if \ref FOONATHAN_MEMORY_CHECK_ALLOCATION_SIZE is \c true.
145 /// \ingroup core
146 class bad_allocation_size : public std::bad_alloc
147 {
148 public:
149 /// The type of the handler called in the constructor of \ref bad_allocation_size.
150 /// When a bad allocation size is detected and the exception object created,
151 /// this handler gets called.
152 /// It is especially useful if exception support is disabled.
153 /// It gets the \ref allocator_info, the size passed to the function and the supported size
154 /// (the latter is still an upper bound).
155 /// \requiredbe It can log the error, throw a different exception derived from \c std::bad_alloc or abort the program.
156 /// If it returns, this exception object will be created and thrown.
157 /// \defaultbe On a hosted implementation it logs the error on \c stderr and continues execution,
158 /// leading to this exception being thrown.
159 /// On a freestanding implementation it does nothing.
160 using handler = void (*)(const allocator_info& info, std::size_t passed,
161 std::size_t supported);
162
163 /// \effects Sets \c h as the new \ref handler in an atomic operation.
164 /// A \c nullptr sets the default \ref handler.
165 /// \returns The previous \ref handler. This is never \c nullptr.
166 static handler set_handler(handler h);
167
168 /// \returns The current \ref handler. This is never \c nullptr.
169 static handler get_handler();
170
171 /// \effects Creates it by passing it the \ref allocator_info, the size passed to the allocation function
172 /// and an upper bound on the supported size.
173 /// It also calls the \ref handler to control whether or not it will be thrown.
174 bad_allocation_size(const allocator_info& info, std::size_t passed,
175 std::size_t supported);
176
177 /// \returns A static NTBS that describes the error.
178 /// It does not contain any specific information since there is no memory for formatting.
179 const char* what() const noexcept override;
180
181 /// \returns The \ref allocator_info passed to it in the constructor.
182 const allocator_info& allocator() const noexcept
183 {
184 return info_;
185 }
186
187 /// \returns The size or alignment value that was passed to the allocation function
188 /// which was too big. This is the same value passed to the constructor.
189 std::size_t passed_value() const noexcept
190 {
191 return passed_;
192 }
193
194 /// \returns An upper bound on the maximum supported size/alignment.
195 /// It is only an upper bound, values below can fail, but values above will always fail.
196 std::size_t supported_value() const noexcept
197 {
198 return supported_;
199 }
200
201 private:
202 allocator_info info_;
203 std::size_t passed_, supported_;
204 };
205
206 /// The exception class thrown when the node size exceeds the supported maximum,
207 /// i.e. it is bigger than \c max_node_size().
208 /// It is derived from \ref bad_allocation_size but does not override the handler.
209 /// \ingroup core
210 class bad_node_size : public bad_allocation_size
211 {
212 public:
213 /// \effects Just forwards to \ref bad_allocation_size.
214 bad_node_size(const allocator_info& info, std::size_t passed, std::size_t supported)
215 : bad_allocation_size(info, passed, supported)
216 {
217 }
218
219 /// \returns A static NTBS that describes the error.
220 /// It does not contain any specific information since there is no memory for formatting.
221 const char* what() const noexcept override;
222 };
223
224 /// The exception class thrown when the array size exceeds the supported maximum,
225 /// i.e. it is bigger than \c max_array_size().
226 /// It is derived from \ref bad_allocation_size but does not override the handler.
227 /// \ingroup core
228 class bad_array_size : public bad_allocation_size
229 {
230 public:
231 /// \effects Just forwards to \ref bad_allocation_size.
232 bad_array_size(const allocator_info& info, std::size_t passed, std::size_t supported)
233 : bad_allocation_size(info, passed, supported)
234 {
235 }
236
237 /// \returns A static NTBS that describes the error.
238 /// It does not contain any specific information since there is no memory for formatting.
239 const char* what() const noexcept override;
240 };
241
242 /// The exception class thrown when the alignment exceeds the supported maximum,
243 /// i.e. it is bigger than \c max_alignment().
244 /// It is derived from \ref bad_allocation_size but does not override the handler.
245 /// \ingroup core
246 class bad_alignment : public bad_allocation_size
247 {
248 public:
249 /// \effects Just forwards to \ref bad_allocation_size.
250 /// \c passed is <tt>count * size</tt>, \c supported the size in bytes.
251 bad_alignment(const allocator_info& info, std::size_t passed, std::size_t supported)
252 : bad_allocation_size(info, passed, supported)
253 {
254 }
255
256 /// \returns A static NTBS that describes the error.
257 /// It does not contain any specific information since there is no memory for formatting.
258 const char* what() const noexcept override;
259 };
260
261 namespace detail
262 {
263 template <class Ex, typename Func>
264 void check_allocation_size(std::size_t passed, Func f, const allocator_info& info)
265 {
266 #if FOONATHAN_MEMORY_CHECK_ALLOCATION_SIZE
267 auto supported = f();
268 if (passed > supported)
269 FOONATHAN_THROW(Ex(info, passed, supported));
270 #else
271 (void)passed;
272 (void)f;
273 (void)info;
274 #endif
275 }
276
277 template <class Ex>
278 void check_allocation_size(std::size_t passed, std::size_t supported,
279 const allocator_info& info)
280 {
281 check_allocation_size<Ex>(
282 passed, [&] { return supported; }, info);
283 }
284 } // namespace detail
285 } // namespace memory
286 } // namespace foonathan
287
288 #endif // FOONATHAN_MEMORY_ERROR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_FALLBACK_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_FALLBACK_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 //// Class template \ref foonathan::memory::fallback_allocator.
9
10 #include "detail/ebo_storage.hpp"
11 #include "detail/utility.hpp"
12 #include "allocator_traits.hpp"
13 #include "config.hpp"
14
15 namespace foonathan
16 {
17 namespace memory
18 {
19 /// A \concept{raw_allocator,RawAllocator} with a fallback.
20 /// Allocation first tries `Default`, if it fails,
21 /// it uses `Fallback`.
22 /// \requires `Default` must be a composable \concept{concept_rawallocator,RawAllocator},
23 /// `Fallback` must be a \concept{concept_rawallocator,RawAllocator}.
24 /// \ingroup adapter
25 template <class Default, class Fallback>
26 class fallback_allocator
27 : FOONATHAN_EBO(detail::ebo_storage<0, typename allocator_traits<Default>::allocator_type>),
28 FOONATHAN_EBO(detail::ebo_storage<1, typename allocator_traits<Fallback>::allocator_type>)
29 {
30 using default_traits = allocator_traits<Default>;
31 using default_composable_traits = composable_allocator_traits<Default>;
32 using fallback_traits = allocator_traits<Fallback>;
33 using fallback_composable_traits = composable_allocator_traits<Fallback>;
34 using fallback_composable =
35 is_composable_allocator<typename fallback_traits::allocator_type>;
36
37 public:
38 using default_allocator_type = typename allocator_traits<Default>::allocator_type;
39 using fallback_allocator_type = typename allocator_traits<Fallback>::allocator_type;
40
41 using is_stateful =
42 std::integral_constant<bool, default_traits::is_stateful::value
43 || fallback_traits::is_stateful::value>;
44
45 /// \effects Default constructs both allocators.
46 /// \notes This function only participates in overload resolution, if both allocators are not stateful.
47 FOONATHAN_ENABLE_IF(!is_stateful::value)
48 fallback_allocator()
49 : detail::ebo_storage<0, default_allocator_type>({}),
50 detail::ebo_storage<1, fallback_allocator_type>({})
51 {
52 }
53
54 /// \effects Constructs the allocator by passing in the two allocators it has.
55 explicit fallback_allocator(default_allocator_type&& default_alloc,
56 fallback_allocator_type&& fallback_alloc = {})
57 : detail::ebo_storage<0, default_allocator_type>(detail::move(default_alloc)),
58 detail::ebo_storage<1, fallback_allocator_type>(detail::move(fallback_alloc))
59 {
60 }
61
62 /// @{
63 /// \effects First calls the compositioning (de)allocation function on the `default_allocator_type`.
64 /// If that fails, uses the non-compositioning function of the `fallback_allocator_type`.
65 void* allocate_node(std::size_t size, std::size_t alignment)
66 {
67 auto ptr = default_composable_traits::try_allocate_node(get_default_allocator(),
68 size, alignment);
69 if (!ptr)
70 ptr = fallback_traits::allocate_node(get_fallback_allocator(), size, alignment);
71 return ptr;
72 }
73
74 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment)
75 {
76 auto ptr = default_composable_traits::try_allocate_array(get_default_allocator(),
77 count, size, alignment);
78 if (!ptr)
79 ptr = fallback_traits::allocate_array(get_fallback_allocator(), count, size,
80 alignment);
81 return ptr;
82 }
83
84 void deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
85 {
86 auto res = default_composable_traits::try_deallocate_node(get_default_allocator(),
87 ptr, size, alignment);
88 if (!res)
89 fallback_traits::deallocate_node(get_fallback_allocator(), ptr, size,
90 alignment);
91 }
92
93 void deallocate_array(void* ptr, std::size_t count, std::size_t size,
94 std::size_t alignment) noexcept
95 {
96 auto res =
97 default_composable_traits::try_deallocate_array(get_default_allocator(), ptr,
98 count, size, alignment);
99 if (!res)
100 fallback_traits::deallocate_array(get_fallback_allocator(), ptr, count, size,
101 alignment);
102 }
103 /// @}
104
105 /// @{
106 /// \effects First calls the compositioning (de)allocation function on the `default_allocator_type`.
107 /// If that fails, uses the compositioning function of the `fallback_allocator_type`.
108 /// \requires The `fallback_allocator_type` msut be composable.
109 FOONATHAN_ENABLE_IF(fallback_composable::value)
110 void* try_allocate_node(std::size_t size, std::size_t alignment) noexcept
111 {
112 auto ptr = default_composable_traits::try_allocate_node(get_default_allocator(),
113 size, alignment);
114 if (!ptr)
115 ptr = fallback_composable_traits::try_allocate_node(get_fallback_allocator(),
116 size, alignment);
117 return ptr;
118 }
119
120 FOONATHAN_ENABLE_IF(fallback_composable::value)
121 void* allocate_array(std::size_t count, std::size_t size,
122 std::size_t alignment) noexcept
123 {
124 auto ptr = default_composable_traits::try_allocate_array(get_default_allocator(),
125 count, size, alignment);
126 if (!ptr)
127 ptr = fallback_composable_traits::try_allocate_array(get_fallback_allocator(),
128 count, size, alignment);
129 return ptr;
130 }
131
132 FOONATHAN_ENABLE_IF(fallback_composable::value)
133 bool try_deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
134 {
135 auto res = default_composable_traits::try_deallocate_node(get_default_allocator(),
136 ptr, size, alignment);
137 if (!res)
138 res = fallback_composable_traits::try_deallocate_node(get_fallback_allocator(),
139 ptr, size, alignment);
140 return res;
141 }
142
143 FOONATHAN_ENABLE_IF(fallback_composable::value)
144 bool try_deallocate_array(void* ptr, std::size_t count, std::size_t size,
145 std::size_t alignment) noexcept
146 {
147 auto res =
148 default_composable_traits::try_deallocate_array(get_default_allocator(), ptr,
149 count, size, alignment);
150 if (!res)
151 res = fallback_composable_traits::try_deallocate_array(get_fallback_allocator(),
152 ptr, count, size,
153 alignment);
154 return res;
155 }
156 /// @}
157
158 /// @{
159 /// \returns The maximum of the two values from both allocators.
160 std::size_t max_node_size() const
161 {
162 auto def = default_traits::max_node_size(get_default_allocator());
163 auto fallback = fallback_traits::max_node_size(get_fallback_allocator());
164 return fallback > def ? fallback : def;
165 }
166
167 std::size_t max_array_size() const
168 {
169 auto def = default_traits::max_array_size(get_default_allocator());
170 auto fallback = fallback_traits::max_array_size(get_fallback_allocator());
171 return fallback > def ? fallback : def;
172 }
173
174 std::size_t max_alignment() const
175 {
176 auto def = default_traits::max_alignment(get_default_allocator());
177 auto fallback = fallback_traits::max_alignment(get_fallback_allocator());
178 return fallback > def ? fallback : def;
179 }
180 /// @}
181
182 /// @{
183 /// \returns A (`const`) reference to the default allocator.
184 default_allocator_type& get_default_allocator() noexcept
185 {
186 return detail::ebo_storage<0, default_allocator_type>::get();
187 }
188
189 const default_allocator_type& get_default_allocator() const noexcept
190 {
191 return detail::ebo_storage<0, default_allocator_type>::get();
192 }
193 /// @}
194
195 /// @{
196 /// \returns A (`const`) reference to the fallback allocator.
197 fallback_allocator_type& get_fallback_allocator() noexcept
198 {
199 return detail::ebo_storage<1, fallback_allocator_type>::get();
200 }
201
202 const fallback_allocator_type& get_fallback_allocator() const noexcept
203 {
204 return detail::ebo_storage<1, fallback_allocator_type>::get();
205 }
206 /// @}
207 };
208 } // namespace memory
209 } // namespace foonathan
210
211 #endif // FOONATHAN_MEMORY_FALLBACK_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_HEAP_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_HEAP_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::heap_allocator and related functions.
9
10 #include "detail/lowlevel_allocator.hpp"
11 #include "config.hpp"
12
13 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
14 #include "allocator_traits.hpp"
15 #endif
16
17 namespace foonathan
18 {
19 namespace memory
20 {
21 struct allocator_info;
22
23 /// Allocates heap memory.
24 /// This function is used by the \ref heap_allocator to allocate the heap memory.
25 /// It is not defined on a freestanding implementation, a definition must be provided by the library user.
26 /// \requiredbe This function shall return a block of uninitialized memory that is aligned for \c max_align_t and has the given size.
27 /// The size parameter will not be zero.
28 /// It shall return a \c nullptr if no memory is available.
29 /// It must be thread safe.
30 /// \defaultbe On a hosted implementation this function uses OS specific facilities, \c std::malloc is used as fallback.
31 /// \ingroup allocator
32 void* heap_alloc(std::size_t size) noexcept;
33
34 /// Deallocates heap memory.
35 /// This function is used by the \ref heap_allocator to allocate the heap memory.
36 /// It is not defined on a freestanding implementation, a definition must be provided by the library user.
37 /// \requiredbe This function gets a pointer from a previous call to \ref heap_alloc with the same size.
38 /// It shall free the memory.
39 /// The pointer will not be zero.
40 /// It must be thread safe.
41 /// \defaultbe On a hosted implementation this function uses OS specific facilities, \c std::free is used as fallback.
42 /// \ingroup allocator
43 void heap_dealloc(void* ptr, std::size_t size) noexcept;
44
45 namespace detail
46 {
47 struct heap_allocator_impl
48 {
49 static allocator_info info() noexcept;
50
51 static void* allocate(std::size_t size, std::size_t) noexcept
52 {
53 return heap_alloc(size);
54 }
55
56 static void deallocate(void* ptr, std::size_t size, std::size_t) noexcept
57 {
58 heap_dealloc(ptr, size);
59 }
60
61 static std::size_t max_node_size() noexcept;
62 };
63
64 FOONATHAN_MEMORY_LL_ALLOCATOR_LEAK_CHECKER(heap_allocator_impl,
65 heap_alloator_leak_checker)
66 } // namespace detail
67
68 /// A stateless \concept{concept_rawallocator,RawAllocator} that allocates memory from the heap.
69 /// It uses the two functions \ref heap_alloc and \ref heap_dealloc for the allocation,
70 /// which default to \c std::malloc and \c std::free.
71 /// \ingroup allocator
72 using heap_allocator =
73 FOONATHAN_IMPL_DEFINED(detail::lowlevel_allocator<detail::heap_allocator_impl>);
74
75 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
76 extern template class detail::lowlevel_allocator<detail::heap_allocator_impl>;
77 extern template class allocator_traits<heap_allocator>;
78 #endif
79 } // namespace memory
80 } // namespace foonathan
81
82 #endif // FOONATHAN_MEMORY_HEAP_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_ITERATION_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_ITERATION_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class template \ref foonathan::memory::iteration_allocator.
9
10 #include "detail/debug_helpers.hpp"
11 #include "detail/memory_stack.hpp"
12 #include "default_allocator.hpp"
13 #include "error.hpp"
14 #include "memory_arena.hpp"
15
16 namespace foonathan
17 {
18 namespace memory
19 {
20 namespace detail
21 {
22 template <class BlockOrRawAllocator>
23 using iteration_block_allocator =
24 make_block_allocator_t<BlockOrRawAllocator, fixed_block_allocator>;
25 } // namespace detail
26
27 /// A stateful \concept{concept_rawallocator,RawAllocator} that is designed for allocations in a loop.
28 /// It uses `N` stacks for the allocation, one of them is always active.
29 /// Allocation uses the currently active stack.
30 /// Calling \ref iteration_allocator::next_iteration() at the end of the loop,
31 /// will make the next stack active for allocation,
32 /// effectively releasing all of its memory.
33 /// Any memory allocated will thus be usable for `N` iterations of the loop.
34 /// This type of allocator is a generalization of the double frame allocator.
35 /// \ingroup allocator
36 template <std::size_t N, class BlockOrRawAllocator = default_allocator>
37 class iteration_allocator
38 : FOONATHAN_EBO(detail::iteration_block_allocator<BlockOrRawAllocator>)
39 {
40 public:
41 using allocator_type = detail::iteration_block_allocator<BlockOrRawAllocator>;
42
43 /// \effects Creates it with a given initial block size and and other constructor arguments for the \concept{concept_blockallocator,BlockAllocator}.
44 /// It will allocate the first (and only) block and evenly divide it on all the stacks it uses.
45 template <typename... Args>
46 explicit iteration_allocator(std::size_t block_size, Args&&... args)
47 : allocator_type(block_size, detail::forward<Args>(args)...), cur_(0u)
48 {
49 block_ = get_allocator().allocate_block();
50 auto cur = static_cast<char*>(block_.memory);
51 auto size_each = block_.size / N;
52 for (auto i = 0u; i != N; ++i)
53 {
54 stacks_[i] = detail::fixed_memory_stack(cur);
55 cur += size_each;
56 }
57 }
58
59 iteration_allocator(iteration_allocator&& other) noexcept
60 : allocator_type(detail::move(other)),
61 block_(other.block_),
62 cur_(detail::move(other.cur_))
63 {
64 for (auto i = 0u; i != N; ++i)
65 stacks_[i] = detail::move(other.stacks_[i]);
66
67 other.cur_ = N;
68 }
69
70 ~iteration_allocator() noexcept
71 {
72 if (cur_ < N)
73 get_allocator().deallocate_block(block_);
74 }
75
76 iteration_allocator& operator=(iteration_allocator&& other) noexcept
77 {
78 allocator_type::operator=(detail::move(other));
79 block_ = other.block_;
80 cur_ = other.cur_;
81
82 for (auto i = 0u; i != N; ++i)
83 stacks_[i] = detail::move(other.stacks_[i]);
84
85 other.cur_ = N;
86
87 return *this;
88 }
89
90 /// \effects Allocates a memory block of given size and alignment.
91 /// It simply moves the top marker of the currently active stack.
92 /// \returns A \concept{concept_node,node} with given size and alignment.
93 /// \throws \ref out_of_fixed_memory if the current stack does not have any memory left.
94 /// \requires \c size and \c alignment must be valid.
95 void* allocate(std::size_t size, std::size_t alignment)
96 {
97 auto& stack = stacks_[cur_];
98
99 auto fence = detail::debug_fence_size;
100 auto offset = detail::align_offset(stack.top() + fence, alignment);
101 if (!stack.top()
102 || (fence + offset + size + fence > std::size_t(block_end(cur_) - stack.top())))
103 FOONATHAN_THROW(out_of_fixed_memory(info(), size));
104 return stack.allocate_unchecked(size, offset);
105 }
106
107 /// \effects Allocates a memory block of given size and alignment
108 /// similar to \ref allocate().
109 /// \returns A \concept{concept_node,node} with given size and alignment
110 /// or `nullptr` if the current stack does not have any memory left.
111 void* try_allocate(std::size_t size, std::size_t alignment) noexcept
112 {
113 auto& stack = stacks_[cur_];
114 return stack.allocate(block_end(cur_), size, alignment);
115 }
116
117 /// \effects Goes to the next internal stack.
118 /// This will clear the stack whose \ref max_iterations() lifetime has reached,
119 /// and use it for all allocations in this iteration.
120 /// \note This function should be called at the end of the loop.
121 void next_iteration() noexcept
122 {
123 FOONATHAN_MEMORY_ASSERT_MSG(cur_ != N, "moved-from allocator");
124 cur_ = (cur_ + 1) % N;
125 stacks_[cur_].unwind(block_start(cur_));
126 }
127
128 /// \returns The number of iteration each allocation will live.
129 /// This is the template parameter `N`.
130 static std::size_t max_iterations() noexcept
131 {
132 return N;
133 }
134
135 /// \returns The index of the current iteration.
136 /// This is modulo \ref max_iterations().
137 std::size_t cur_iteration() const noexcept
138 {
139 return cur_;
140 }
141
142 /// \returns A reference to the \concept{concept_blockallocator,BlockAllocator} used for managing the memory.
143 /// \requires It is undefined behavior to move this allocator out into another object.
144 allocator_type& get_allocator() noexcept
145 {
146 return *this;
147 }
148
149 /// \returns The amount of memory remaining in the stack with the given index.
150 /// This is the number of bytes that are available for allocation.
151 std::size_t capacity_left(std::size_t i) const noexcept
152 {
153 return std::size_t(block_end(i) - stacks_[i].top());
154 }
155
156 /// \returns The amount of memory remaining in the currently active stack.
157 std::size_t capacity_left() const noexcept
158 {
159 return capacity_left(cur_iteration());
160 }
161
162 private:
163 allocator_info info() const noexcept
164 {
165 return {FOONATHAN_MEMORY_LOG_PREFIX "::iteration_allocator", this};
166 }
167
168 char* block_start(std::size_t i) const noexcept
169 {
170 FOONATHAN_MEMORY_ASSERT_MSG(i <= N, "moved from state");
171 auto ptr = static_cast<char*>(block_.memory);
172 return ptr + (i * block_.size / N);
173 }
174
175 char* block_end(std::size_t i) const noexcept
176 {
177 FOONATHAN_MEMORY_ASSERT_MSG(i < N, "moved from state");
178 return block_start(i + 1);
179 }
180
181 detail::fixed_memory_stack stacks_[N];
182 memory_block block_;
183 std::size_t cur_;
184
185 friend allocator_traits<iteration_allocator<N, BlockOrRawAllocator>>;
186 friend composable_allocator_traits<iteration_allocator<N, BlockOrRawAllocator>>;
187 };
188
189 /// An alias for \ref iteration_allocator for two iterations.
190 /// \ingroup allocator
191 template <class BlockOrRawAllocator = default_allocator>
192 FOONATHAN_ALIAS_TEMPLATE(double_frame_allocator,
193 iteration_allocator<2, BlockOrRawAllocator>);
194
195 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
196 extern template class iteration_allocator<2>;
197 #endif
198
199 /// Specialization of the \ref allocator_traits for \ref iteration_allocator.
200 /// \note It is not allowed to mix calls through the specialization and through the member functions,
201 /// i.e. \ref memory_stack::allocate() and this \c allocate_node().
202 /// \ingroup allocator
203 template <std::size_t N, class BlockAllocator>
204 class allocator_traits<iteration_allocator<N, BlockAllocator>>
205 {
206 public:
207 using allocator_type = iteration_allocator<N, BlockAllocator>;
208 using is_stateful = std::true_type;
209
210 /// \returns The result of \ref iteration_allocator::allocate().
211 static void* allocate_node(allocator_type& state, std::size_t size,
212 std::size_t alignment)
213 {
214 return state.allocate(size, alignment);
215 }
216
217 /// \returns The result of \ref memory_stack::allocate().
218 static void* allocate_array(allocator_type& state, std::size_t count, std::size_t size,
219 std::size_t alignment)
220 {
221 return allocate_node(state, count * size, alignment);
222 }
223
224 /// @{
225 /// \effects Does nothing.
226 /// Actual deallocation can only be done via \ref memory_stack::unwind().
227 static void deallocate_node(allocator_type&, void*, std::size_t, std::size_t) noexcept
228 {
229 }
230
231 static void deallocate_array(allocator_type&, void*, std::size_t, std::size_t,
232 std::size_t) noexcept
233 {
234 }
235 /// @}
236
237 /// @{
238 /// \returns The maximum size which is \ref iteration_allocator::capacity_left().
239 static std::size_t max_node_size(const allocator_type& state) noexcept
240 {
241 return state.capacity_left();
242 }
243
244 static std::size_t max_array_size(const allocator_type& state) noexcept
245 {
246 return state.capacity_left();
247 }
248 /// @}
249
250 /// \returns The maximum possible value since there is no alignment restriction
251 /// (except indirectly through \ref memory_stack::next_capacity()).
252 static std::size_t max_alignment(const allocator_type&) noexcept
253 {
254 return std::size_t(-1);
255 }
256 };
257
258 /// Specialization of the \ref composable_allocator_traits for \ref iteration_allocator classes.
259 /// \ingroup allocator
260 template <std::size_t N, class BlockAllocator>
261 class composable_allocator_traits<iteration_allocator<N, BlockAllocator>>
262 {
263 public:
264 using allocator_type = iteration_allocator<N, BlockAllocator>;
265
266 /// \returns The result of \ref memory_stack::try_allocate().
267 static void* try_allocate_node(allocator_type& state, std::size_t size,
268 std::size_t alignment) noexcept
269 {
270 return state.try_allocate(size, alignment);
271 }
272
273 /// \returns The result of \ref memory_stack::try_allocate().
274 static void* try_allocate_array(allocator_type& state, std::size_t count,
275 std::size_t size, std::size_t alignment) noexcept
276 {
277 return state.try_allocate(count * size, alignment);
278 }
279
280 /// @{
281 /// \effects Does nothing.
282 /// \returns Whether the memory will be deallocated by \ref memory_stack::unwind().
283 static bool try_deallocate_node(allocator_type& state, void* ptr, std::size_t,
284 std::size_t) noexcept
285 {
286 return state.block_.contains(ptr);
287 }
288
289 static bool try_deallocate_array(allocator_type& state, void* ptr, std::size_t count,
290 std::size_t size, std::size_t alignment) noexcept
291 {
292 return try_deallocate_node(state, ptr, count * size, alignment);
293 }
294 /// @}
295 };
296
297 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
298 extern template class allocator_traits<iteration_allocator<2>>;
299 extern template class composable_allocator_traits<iteration_allocator<2>>;
300 #endif
301 } // namespace memory
302 } // namespace foonathan
303
304 #endif // FOONATHAN_MEMORY_ITERATION_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_JOINT_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_JOINT_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class template \ref foonathan::memory::joint_ptr, \ref foonathan::memory::joint_allocator and related.
9
10 #include <initializer_list>
11 #include <new>
12
13 #include "detail/align.hpp"
14 #include "detail/memory_stack.hpp"
15 #include "detail/utility.hpp"
16 #include "allocator_storage.hpp"
17 #include "config.hpp"
18 #include "default_allocator.hpp"
19 #include "error.hpp"
20
21 namespace foonathan
22 {
23 namespace memory
24 {
25 template <typename T, class RawAllocator>
26 class joint_ptr;
27
28 template <typename T>
29 class joint_type;
30
31 namespace detail
32 {
33 // the stack that allocates the joint memory
34 class joint_stack
35 {
36 public:
37 joint_stack(void* mem, std::size_t cap) noexcept
38 : stack_(static_cast<char*>(mem)), end_(static_cast<char*>(mem) + cap)
39 {
40 }
41
42 void* allocate(std::size_t size, std::size_t alignment) noexcept
43 {
44 return stack_.allocate(end_, size, alignment, 0u);
45 }
46
47 bool bump(std::size_t offset) noexcept
48 {
49 if (offset > std::size_t(end_ - stack_.top()))
50 return false;
51 stack_.bump(offset);
52 return true;
53 }
54
55 char* top() noexcept
56 {
57 return stack_.top();
58 }
59
60 const char* top() const noexcept
61 {
62 return stack_.top();
63 }
64
65 void unwind(void* ptr) noexcept
66 {
67 stack_.unwind(static_cast<char*>(ptr));
68 }
69
70 std::size_t capacity(const char* mem) const noexcept
71 {
72 return std::size_t(end_ - mem);
73 }
74
75 std::size_t capacity_left() const noexcept
76 {
77 return std::size_t(end_ - top());
78 }
79
80 std::size_t capacity_used(const char* mem) const noexcept
81 {
82 return std::size_t(top() - mem);
83 }
84
85 private:
86 detail::fixed_memory_stack stack_;
87 char* end_;
88 };
89
90 template <typename T>
91 detail::joint_stack& get_stack(joint_type<T>& obj) noexcept;
92
93 template <typename T>
94 const detail::joint_stack& get_stack(const joint_type<T>& obj) noexcept;
95 } // namespace detail
96
97 /// Tag type that can't be created.
98 ///
99 /// It isued by \ref joint_ptr.
100 /// \ingroup allocator
101 class joint
102 {
103 joint(std::size_t cap) noexcept : capacity(cap) {}
104
105 std::size_t capacity;
106
107 template <typename T, class RawAllocator>
108 friend class joint_ptr;
109 template <typename T>
110 friend class joint_type;
111 };
112
113 /// Tag type to make the joint size more explicit.
114 ///
115 /// It is used by \ref joint_ptr.
116 /// \ingroup allocator
117 struct joint_size
118 {
119 std::size_t size;
120
121 explicit joint_size(std::size_t s) noexcept : size(s) {}
122 };
123
124 /// CRTP base class for all objects that want to use joint memory.
125 ///
126 /// This will disable default copy/move operations
127 /// and inserts additional members for the joint memory management.
128 /// \ingroup allocator
129 template <typename T>
130 class joint_type
131 {
132 protected:
133 /// \effects Creates the base class,
134 /// the tag type cannot be created by the user.
135 /// \note This ensures that you cannot create joint types yourself.
136 joint_type(joint j) noexcept;
137
138 joint_type(const joint_type&) = delete;
139 joint_type(joint_type&&) = delete;
140
141 private:
142 detail::joint_stack stack_;
143
144 template <typename U>
145 friend detail::joint_stack& detail::get_stack(joint_type<U>& obj) noexcept;
146 template <typename U>
147 friend const detail::joint_stack& detail::get_stack(const joint_type<U>& obj) noexcept;
148 };
149
150 namespace detail
151 {
152 template <typename T>
153 detail::joint_stack& get_stack(joint_type<T>& obj) noexcept
154 {
155 return obj.stack_;
156 }
157
158 template <typename T>
159 const detail::joint_stack& get_stack(const joint_type<T>& obj) noexcept
160 {
161 return obj.stack_;
162 }
163
164 template <typename T>
165 char* get_memory(joint_type<T>& obj) noexcept
166 {
167 auto mem = static_cast<void*>(&obj);
168 return static_cast<char*>(mem) + sizeof(T);
169 }
170
171 template <typename T>
172 const char* get_memory(const joint_type<T>& obj) noexcept
173 {
174 auto mem = static_cast<const void*>(&obj);
175 return static_cast<const char*>(mem) + sizeof(T);
176 }
177
178 } // namespace detail
179
180 template <typename T>
181 joint_type<T>::joint_type(joint j) noexcept : stack_(detail::get_memory(*this), j.capacity)
182 {
183 FOONATHAN_MEMORY_ASSERT(stack_.top() == detail::get_memory(*this));
184 FOONATHAN_MEMORY_ASSERT(stack_.capacity_left() == j.capacity);
185 }
186
187 /// A pointer to an object where all allocations are joint.
188 ///
189 /// It can either own an object or not (be `nullptr`).
190 /// When it owns an object, it points to a memory block.
191 /// This memory block contains both the actual object (of the type `T`)
192 /// and space for allocations of `T`s members.
193 ///
194 /// The type `T` must be derived from \ref joint_type and every constructor must take \ref joint
195 /// as first parameter.
196 /// This prevents that you create joint objects yourself,
197 /// without the additional storage.
198 /// The default copy and move constructors are also deleted,
199 /// you need to write them yourself.
200 ///
201 /// You can only access the object through the pointer,
202 /// use \ref joint_allocator or \ref joint_array as members of `T`,
203 /// to enable the memory sharing.
204 /// If you are using \ref joint_allocator inside STL containers,
205 /// make sure that you do not call their regular copy/move constructors,
206 /// but instead the version where you pass an allocator.
207 ///
208 /// The memory block will be managed by the given \concept{concept_rawallocator,RawAllocator},
209 /// it is stored in an \ref allocator_reference and not owned by the pointer directly.
210 /// \ingroup allocator
211 template <typename T, class RawAllocator>
212 class joint_ptr : FOONATHAN_EBO(allocator_reference<RawAllocator>)
213 {
214 static_assert(std::is_base_of<joint_type<T>, T>::value,
215 "T must be derived of joint_type<T>");
216
217 public:
218 using element_type = T;
219 using allocator_type = typename allocator_reference<RawAllocator>::allocator_type;
220
221 //=== constructors/destructor/assignment ===//
222 /// @{
223 /// \effects Creates it with a \concept{concept_rawallocator,RawAllocator}, but does not own a new object.
224 explicit joint_ptr(allocator_type& alloc) noexcept
225 : allocator_reference<RawAllocator>(alloc), ptr_(nullptr)
226 {
227 }
228
229 explicit joint_ptr(const allocator_type& alloc) noexcept
230 : allocator_reference<RawAllocator>(alloc), ptr_(nullptr)
231 {
232 }
233 /// @}
234
235 /// @{
236 /// \effects Reserves memory for the object and the additional size,
237 /// and creates the object by forwarding the arguments to its constructor.
238 /// The \concept{concept_rawallocator,RawAllocator} will be used for the allocation.
239 template <typename... Args>
240 joint_ptr(allocator_type& alloc, joint_size additional_size, Args&&... args)
241 : joint_ptr(alloc)
242 {
243 create(additional_size.size, detail::forward<Args>(args)...);
244 }
245
246 template <typename... Args>
247 joint_ptr(const allocator_type& alloc, joint_size additional_size, Args&&... args)
248 : joint_ptr(alloc)
249 {
250 create(additional_size.size, detail::forward<Args>(args)...);
251 }
252 /// @}
253
254 /// \effects Move-constructs the pointer.
255 /// Ownership will be transferred from `other` to the new object.
256 joint_ptr(joint_ptr&& other) noexcept
257 : allocator_reference<RawAllocator>(detail::move(other)), ptr_(other.ptr_)
258 {
259 other.ptr_ = nullptr;
260 }
261
262 /// \effects Destroys the object and deallocates its storage.
263 ~joint_ptr() noexcept
264 {
265 reset();
266 }
267
268 /// \effects Move-assings the pointer.
269 /// The previously owned object will be destroyed,
270 /// and ownership of `other` transferred.
271 joint_ptr& operator=(joint_ptr&& other) noexcept
272 {
273 joint_ptr tmp(detail::move(other));
274 swap(*this, tmp);
275 return *this;
276 }
277
278 /// \effects Same as `reset()`.
279 joint_ptr& operator=(std::nullptr_t) noexcept
280 {
281 reset();
282 return *this;
283 }
284
285 /// \effects Swaps to pointers and their ownership and allocator.
286 friend void swap(joint_ptr& a, joint_ptr& b) noexcept
287 {
288 detail::adl_swap(static_cast<allocator_reference<RawAllocator>&>(a),
289 static_cast<allocator_reference<RawAllocator>&>(b));
290 detail::adl_swap(a.ptr_, b.ptr_);
291 }
292
293 //=== modifiers ===//
294 /// \effects Destroys the object it refers to,
295 /// if there is any.
296 void reset() noexcept
297 {
298 if (ptr_)
299 {
300 (**this).~element_type();
301 this->deallocate_node(ptr_,
302 sizeof(element_type)
303 + detail::get_stack(*ptr_).capacity(
304 detail::get_memory(*ptr_)),
305 alignof(element_type));
306 ptr_ = nullptr;
307 }
308 }
309
310 //=== accessors ===//
311 /// \returns `true` if the pointer does own an object,
312 /// `false` otherwise.
313 explicit operator bool() const noexcept
314 {
315 return ptr_ != nullptr;
316 }
317
318 /// \returns A reference to the object it owns.
319 /// \requires The pointer must own an object,
320 /// i.e. `operator bool()` must return `true`.
321 element_type& operator*() const noexcept
322 {
323 FOONATHAN_MEMORY_ASSERT(ptr_);
324 return *get();
325 }
326
327 /// \returns A pointer to the object it owns.
328 /// \requires The pointer must own an object,
329 /// i.e. `operator bool()` must return `true`.
330 element_type* operator->() const noexcept
331 {
332 FOONATHAN_MEMORY_ASSERT(ptr_);
333 return get();
334 }
335
336 /// \returns A pointer to the object it owns
337 /// or `nullptr`, if it does not own any object.
338 element_type* get() const noexcept
339 {
340 return static_cast<element_type*>(ptr_);
341 }
342
343 /// \returns A reference to the allocator it will use for the deallocation.
344 auto get_allocator() const noexcept
345 -> decltype(std::declval<allocator_reference<allocator_type>>().get_allocator())
346 {
347 return this->allocator_reference<allocator_type>::get_allocator();
348 }
349
350 private:
351 template <typename... Args>
352 void create(std::size_t additional_size, Args&&... args)
353 {
354 auto mem = this->allocate_node(sizeof(element_type) + additional_size,
355 alignof(element_type));
356
357 element_type* ptr = nullptr;
358 #if FOONATHAN_HAS_EXCEPTION_SUPPORT
359 try
360 {
361 ptr = ::new (mem)
362 element_type(joint(additional_size), detail::forward<Args>(args)...);
363 }
364 catch (...)
365 {
366 this->deallocate_node(mem, sizeof(element_type) + additional_size,
367 alignof(element_type));
368 throw;
369 }
370 #else
371 ptr = ::new (mem)
372 element_type(joint(additional_size), detail::forward<Args>(args)...);
373 #endif
374 ptr_ = ptr;
375 }
376
377 joint_type<T>* ptr_;
378
379 friend class joint_allocator;
380 };
381
382 /// @{
383 /// \returns `!ptr`,
384 /// i.e. if `ptr` does not own anything.
385 /// \relates joint_ptr
386 template <typename T, class RawAllocator>
387 bool operator==(const joint_ptr<T, RawAllocator>& ptr, std::nullptr_t)
388 {
389 return !ptr;
390 }
391
392 template <typename T, class RawAllocator>
393 bool operator==(std::nullptr_t, const joint_ptr<T, RawAllocator>& ptr)
394 {
395 return ptr == nullptr;
396 }
397 /// @}
398
399 /// @{
400 /// \returns `ptr.get() == p`,
401 /// i.e. if `ptr` ownws the object referred to by `p`.
402 /// \relates joint_ptr
403 template <typename T, class RawAllocator>
404 bool operator==(const joint_ptr<T, RawAllocator>& ptr, T* p)
405 {
406 return ptr.get() == p;
407 }
408
409 template <typename T, class RawAllocator>
410 bool operator==(T* p, const joint_ptr<T, RawAllocator>& ptr)
411 {
412 return ptr == p;
413 }
414 /// @}
415
416 /// @{
417 /// \returns `!(ptr == nullptr)`,
418 /// i.e. if `ptr` does own something.
419 /// \relates joint_ptr
420 template <typename T, class RawAllocator>
421 bool operator!=(const joint_ptr<T, RawAllocator>& ptr, std::nullptr_t)
422 {
423 return !(ptr == nullptr);
424 }
425
426 template <typename T, class RawAllocator>
427 bool operator!=(std::nullptr_t, const joint_ptr<T, RawAllocator>& ptr)
428 {
429 return ptr != nullptr;
430 }
431 /// @}
432
433 /// @{
434 /// \returns `!(ptr == p)`,
435 /// i.e. if `ptr` does not ownw the object referred to by `p`.
436 /// \relates joint_ptr
437 template <typename T, class RawAllocator>
438 bool operator!=(const joint_ptr<T, RawAllocator>& ptr, T* p)
439 {
440 return !(ptr == p);
441 }
442
443 template <typename T, class RawAllocator>
444 bool operator!=(T* p, const joint_ptr<T, RawAllocator>& ptr)
445 {
446 return ptr != p;
447 }
448 /// @}
449
450 /// @{
451 /// \returns A new \ref joint_ptr as if created with the same arguments passed to the constructor.
452 /// \relatesalso joint_ptr
453 /// \ingroup allocator
454 template <typename T, class RawAllocator, typename... Args>
455 auto allocate_joint(RawAllocator& alloc, joint_size additional_size, Args&&... args)
456 -> joint_ptr<T, RawAllocator>
457 {
458 return joint_ptr<T, RawAllocator>(alloc, additional_size,
459 detail::forward<Args>(args)...);
460 }
461
462 template <typename T, class RawAllocator, typename... Args>
463 auto allocate_joint(const RawAllocator& alloc, joint_size additional_size, Args&&... args)
464 -> joint_ptr<T, RawAllocator>
465 {
466 return joint_ptr<T, RawAllocator>(alloc, additional_size,
467 detail::forward<Args>(args)...);
468 }
469 /// @}
470
471 /// @{
472 /// \returns A new \ref joint_ptr that points to a copy of `joint`.
473 /// It will allocate as much memory as needed and forward to the copy constructor.
474 /// \ingroup allocator
475 template <class RawAllocator, typename T>
476 auto clone_joint(RawAllocator& alloc, const joint_type<T>& joint)
477 -> joint_ptr<T, RawAllocator>
478 {
479 return joint_ptr<T, RawAllocator>(alloc,
480 joint_size(detail::get_stack(joint).capacity_used(
481 detail::get_memory(joint))),
482 static_cast<const T&>(joint));
483 }
484
485 template <class RawAllocator, typename T>
486 auto clone_joint(const RawAllocator& alloc, const joint_type<T>& joint)
487 -> joint_ptr<T, RawAllocator>
488 {
489 return joint_ptr<T, RawAllocator>(alloc,
490 joint_size(detail::get_stack(joint).capacity_used(
491 detail::get_memory(joint))),
492 static_cast<const T&>(joint));
493 }
494 /// @}
495
496 /// A \concept{concept_rawallocator,RawAllocator} that uses the additional joint memory for its allocation.
497 ///
498 /// It is somewhat limited and allows only allocation once.
499 /// All joint allocators for an object share the joint memory and must not be used in multiple threads.
500 /// The memory it returns is owned by a \ref joint_ptr and will be destroyed through it.
501 /// \ingroup allocator
502 class joint_allocator
503 {
504 public:
505 #if defined(__GNUC__) && (!defined(_GLIBCXX_USE_CXX11_ABI) || _GLIBCXX_USE_CXX11_ABI == 0)
506 // std::string requires default constructor for the small string optimization when using gcc's old ABI
507 // so add one, but it must never be used for allocation
508 joint_allocator() noexcept : stack_(nullptr) {}
509 #endif
510
511 /// \effects Creates it using the joint memory of the given object.
512 template <typename T>
513 joint_allocator(joint_type<T>& j) noexcept : stack_(&detail::get_stack(j))
514 {
515 }
516
517 joint_allocator(const joint_allocator& other) noexcept = default;
518 joint_allocator& operator=(const joint_allocator& other) noexcept = default;
519
520 /// \effects Allocates a node with given properties.
521 /// \returns A pointer to the new node.
522 /// \throws \ref out_of_fixed_memory exception if this function has been called for a second time
523 /// or the joint memory block is exhausted.
524 void* allocate_node(std::size_t size, std::size_t alignment)
525 {
526 FOONATHAN_MEMORY_ASSERT(stack_);
527 auto mem = stack_->allocate(size, alignment);
528 if (!mem)
529 FOONATHAN_THROW(out_of_fixed_memory(info(), size));
530 return mem;
531 }
532
533 /// \effects Deallocates the node, if possible.
534 /// \note It is only possible if it was the last allocation.
535 void deallocate_node(void* ptr, std::size_t size, std::size_t) noexcept
536 {
537 FOONATHAN_MEMORY_ASSERT(stack_);
538 auto end = static_cast<char*>(ptr) + size;
539 if (end == stack_->top())
540 stack_->unwind(ptr);
541 }
542
543 private:
544 allocator_info info() const noexcept
545 {
546 return allocator_info(FOONATHAN_MEMORY_LOG_PREFIX "::joint_allocator", this);
547 }
548
549 detail::joint_stack* stack_;
550
551 friend bool operator==(const joint_allocator& lhs, const joint_allocator& rhs) noexcept;
552 };
553
554 /// @{
555 /// \returns Whether `lhs` and `rhs` use the same joint memory for the allocation.
556 /// \relates joint_allocator
557 inline bool operator==(const joint_allocator& lhs, const joint_allocator& rhs) noexcept
558 {
559 return lhs.stack_ == rhs.stack_;
560 }
561
562 inline bool operator!=(const joint_allocator& lhs, const joint_allocator& rhs) noexcept
563 {
564 return !(lhs == rhs);
565 }
566 /// @}
567
568 /// Specialization of \ref is_shared_allocator to mark \ref joint_allocator as shared.
569 /// This allows using it as \ref allocator_reference directly.
570 /// \ingroup allocator
571 template <>
572 struct is_shared_allocator<joint_allocator> : std::true_type
573 {
574 };
575
576 /// Specialization of \ref is_thread_safe_allocator to mark \ref joint_allocator as thread safe.
577 /// This is an optimization to get rid of the mutex in \ref allocator_storage,
578 /// as joint allocator must not be shared between threads.
579 /// \note The allocator is *not* thread safe, it just must not be shared.
580 template <>
581 struct is_thread_safe_allocator<joint_allocator> : std::true_type
582 {
583 };
584
585 #if !defined(DOXYGEN)
586 template <class RawAllocator>
587 struct propagation_traits;
588 #endif
589
590 /// Specialization of the \ref propagation_traits for the \ref joint_allocator.
591 /// A joint allocator does not propagate on assignment
592 /// and it is not allowed to use the regular copy/move constructor of allocator aware containers,
593 /// instead it needs the copy/move constructor with allocator.
594 /// \note This is required because the container constructor will end up copying/moving the allocator.
595 /// But this is not allowed as you need the allocator with the correct joined memory.
596 /// Copying can be customized (i.e. forbidden), but sadly not move, so keep that in mind.
597 /// \ingroup allocator
598 template <>
599 struct propagation_traits<joint_allocator>
600 {
601 using propagate_on_container_swap = std::false_type;
602 using propagate_on_container_move_assignment = std::false_type;
603 using propagate_on_container_copy_assignment = std::false_type;
604
605 template <class AllocReference>
606 static AllocReference select_on_container_copy_construction(const AllocReference&)
607 {
608 static_assert(always_false<AllocReference>::value,
609 "you must not use the regular copy constructor");
610 }
611
612 private:
613 template <typename T>
614 struct always_false : std::false_type
615 {
616 };
617 };
618
619 /// A zero overhead dynamic array using joint memory.
620 ///
621 /// If you use, e.g. `std::vector` with \ref joint_allocator,
622 /// this has a slight additional overhead.
623 /// This type is joint memory aware and has no overhead.
624 ///
625 /// It has a dynamic, but fixed size,
626 /// it cannot grow after it has been created.
627 /// \ingroup allocator
628 template <typename T>
629 class joint_array
630 {
631 public:
632 using value_type = T;
633 using iterator = value_type*;
634 using const_iterator = const value_type*;
635
636 //=== constructors ===//
637 /// \effects Creates with `size` default-constructed objects using the specified joint memory.
638 /// \throws \ref out_of_fixed_memory if `size` is too big
639 /// and anything thrown by `T`s constructor.
640 /// If an allocation is thrown, the memory will be released directly.
641 template <typename JointType>
642 joint_array(std::size_t size, joint_type<JointType>& j)
643 : joint_array(detail::get_stack(j), size)
644 {
645 }
646
647 /// \effects Creates with `size` copies of `val` using the specified joint memory.
648 /// \throws \ref out_of_fixed_memory if `size` is too big
649 /// and anything thrown by `T`s constructor.
650 /// If an allocation is thrown, the memory will be released directly.
651 template <typename JointType>
652 joint_array(std::size_t size, const value_type& val, joint_type<JointType>& j)
653 : joint_array(detail::get_stack(j), size, val)
654 {
655 }
656
657 /// \effects Creates with the copies of the objects in the initializer list using the specified joint memory.
658 /// \throws \ref out_of_fixed_memory if the size is too big
659 /// and anything thrown by `T`s constructor.
660 /// If an allocation is thrown, the memory will be released directly.
661 template <typename JointType>
662 joint_array(std::initializer_list<value_type> ilist, joint_type<JointType>& j)
663 : joint_array(detail::get_stack(j), ilist)
664 {
665 }
666
667 /// \effects Creates it by forwarding each element of the range to `T`s constructor using the specified joint memory.
668 /// \throws \ref out_of_fixed_memory if the size is too big
669 /// and anything thrown by `T`s constructor.
670 /// If an allocation is thrown, the memory will be released directly.
671 template <typename InIter, typename JointType,
672 typename = decltype(*std::declval<InIter&>()++)>
673 joint_array(InIter begin, InIter end, joint_type<JointType>& j)
674 : joint_array(detail::get_stack(j), begin, end)
675 {
676 }
677
678 joint_array(const joint_array&) = delete;
679
680 /// \effects Copy constructs each element from `other` into the storage of the specified joint memory.
681 /// \throws \ref out_of_fixed_memory if the size is too big
682 /// and anything thrown by `T`s constructor.
683 /// If an allocation is thrown, the memory will be released directly.
684 template <typename JointType>
685 joint_array(const joint_array& other, joint_type<JointType>& j)
686 : joint_array(detail::get_stack(j), other)
687 {
688 }
689
690 joint_array(joint_array&&) = delete;
691
692 /// \effects Move constructs each element from `other` into the storage of the specified joint memory.
693 /// \throws \ref out_of_fixed_memory if the size is too big
694 /// and anything thrown by `T`s constructor.
695 /// If an allocation is thrown, the memory will be released directly.
696 template <typename JointType>
697 joint_array(joint_array&& other, joint_type<JointType>& j)
698 : joint_array(detail::get_stack(j), detail::move(other))
699 {
700 }
701
702 /// \effects Destroys all objects,
703 /// but does not release the storage.
704 ~joint_array() noexcept
705 {
706 for (std::size_t i = 0u; i != size_; ++i)
707 ptr_[i].~T();
708 }
709
710 joint_array& operator=(const joint_array&) = delete;
711 joint_array& operator=(joint_array&&) = delete;
712
713 //=== accessors ===//
714 /// @{
715 /// \returns A reference to the `i`th object.
716 /// \requires `i < size()`.
717 value_type& operator[](std::size_t i) noexcept
718 {
719 FOONATHAN_MEMORY_ASSERT(i < size_);
720 return ptr_[i];
721 }
722
723 const value_type& operator[](std::size_t i) const noexcept
724 {
725 FOONATHAN_MEMORY_ASSERT(i < size_);
726 return ptr_[i];
727 }
728 /// @}
729
730 /// @{
731 /// \returns A pointer to the first object.
732 /// It points to contiguous memory and can be used to access the objects directly.
733 value_type* data() noexcept
734 {
735 return ptr_;
736 }
737
738 const value_type* data() const noexcept
739 {
740 return ptr_;
741 }
742 /// @}
743
744 /// @{
745 /// \returns A random access iterator to the first element.
746 iterator begin() noexcept
747 {
748 return ptr_;
749 }
750
751 const_iterator begin() const noexcept
752 {
753 return ptr_;
754 }
755 /// @}
756
757 /// @{
758 /// \returns A random access iterator one past the last element.
759 iterator end() noexcept
760 {
761 return ptr_ + size_;
762 }
763
764 const_iterator end() const noexcept
765 {
766 return ptr_ + size_;
767 }
768 /// @}
769
770 /// \returns The number of elements in the array.
771 std::size_t size() const noexcept
772 {
773 return size_;
774 }
775
776 /// \returns `true` if the array is empty, `false` otherwise.
777 bool empty() const noexcept
778 {
779 return size_ == 0u;
780 }
781
782 private:
783 // allocate only
784 struct allocate_only
785 {
786 };
787 joint_array(allocate_only, detail::joint_stack& stack, std::size_t size)
788 : ptr_(nullptr), size_(0u)
789 {
790 ptr_ = static_cast<T*>(stack.allocate(size * sizeof(T), alignof(T)));
791 if (!ptr_)
792 FOONATHAN_THROW(out_of_fixed_memory(info(), size * sizeof(T)));
793 }
794
795 class builder
796 {
797 public:
798 builder(detail::joint_stack& stack, T* ptr) noexcept
799 : stack_(&stack), objects_(ptr), size_(0u)
800 {
801 }
802
803 ~builder() noexcept
804 {
805 for (std::size_t i = 0u; i != size_; ++i)
806 objects_[i].~T();
807
808 if (size_)
809 stack_->unwind(objects_);
810 }
811
812 builder(builder&&) = delete;
813 builder& operator=(builder&&) = delete;
814
815 template <typename... Args>
816 T* create(Args&&... args)
817 {
818 auto ptr = ::new (static_cast<void*>(&objects_[size_]))
819 T(detail::forward<Args>(args)...);
820 ++size_;
821 return ptr;
822 }
823
824 std::size_t size() const noexcept
825 {
826 return size_;
827 }
828
829 std::size_t release() noexcept
830 {
831 auto res = size_;
832 size_ = 0u;
833 return res;
834 }
835
836 private:
837 detail::joint_stack* stack_;
838 T* objects_;
839 std::size_t size_;
840 };
841
842 joint_array(detail::joint_stack& stack, std::size_t size)
843 : joint_array(allocate_only{}, stack, size)
844 {
845 builder b(stack, ptr_);
846 for (auto i = 0u; i != size; ++i)
847 b.create();
848 size_ = b.release();
849 }
850
851 joint_array(detail::joint_stack& stack, std::size_t size, const value_type& value)
852 : joint_array(allocate_only{}, stack, size)
853 {
854 builder b(stack, ptr_);
855 for (auto i = 0u; i != size; ++i)
856 b.create(value);
857 size_ = b.release();
858 }
859
860 joint_array(detail::joint_stack& stack, std::initializer_list<value_type> ilist)
861 : joint_array(allocate_only{}, stack, ilist.size())
862 {
863 builder b(stack, ptr_);
864 for (auto& elem : ilist)
865 b.create(elem);
866 size_ = b.release();
867 }
868
869 joint_array(detail::joint_stack& stack, const joint_array& other)
870 : joint_array(allocate_only{}, stack, other.size())
871 {
872 builder b(stack, ptr_);
873 for (auto& elem : other)
874 b.create(elem);
875 size_ = b.release();
876 }
877
878 joint_array(detail::joint_stack& stack, joint_array&& other)
879 : joint_array(allocate_only{}, stack, other.size())
880 {
881 builder b(stack, ptr_);
882 for (auto& elem : other)
883 b.create(detail::move(elem));
884 size_ = b.release();
885 }
886
887 template <typename InIter>
888 joint_array(detail::joint_stack& stack, InIter begin, InIter end)
889 : ptr_(nullptr), size_(0u)
890 {
891 if (begin == end)
892 return;
893
894 ptr_ = static_cast<T*>(stack.allocate(sizeof(T), alignof(T)));
895 if (!ptr_)
896 FOONATHAN_THROW(out_of_fixed_memory(info(), sizeof(T)));
897
898 builder b(stack, ptr_);
899 b.create(*begin++);
900
901 for (auto last = ptr_; begin != end; ++begin)
902 {
903 // just bump stack to get more memory
904 if (!stack.bump(sizeof(T)))
905 FOONATHAN_THROW(out_of_fixed_memory(info(), b.size() * sizeof(T)));
906
907 auto cur = b.create(*begin);
908 FOONATHAN_MEMORY_ASSERT(last + 1 == cur);
909 last = cur;
910 }
911
912 size_ = b.release();
913 }
914
915 allocator_info info() const noexcept
916 {
917 return {FOONATHAN_MEMORY_LOG_PREFIX "::joint_array", this};
918 }
919
920 value_type* ptr_;
921 std::size_t size_;
922 };
923 } // namespace memory
924 } // namespace foonathan
925
926 #endif // FOONATHAN_MEMORY_JOINT_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_MALLOC_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_MALLOC_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::malloc_allocator.
9 /// \note Only available on a hosted implementation.
10
11 #include "config.hpp"
12 #if !FOONATHAN_HOSTED_IMPLEMENTATION
13 #error "This header is only available for a hosted implementation."
14 #endif
15
16 #include <cstdlib>
17 #include <memory>
18
19 #include "detail/lowlevel_allocator.hpp"
20
21 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
22 #include "allocator_traits.hpp"
23 #endif
24
25 namespace foonathan
26 {
27 namespace memory
28 {
29 struct allocator_info;
30
31 namespace detail
32 {
33 struct malloc_allocator_impl
34 {
35 static allocator_info info() noexcept;
36
37 static void* allocate(std::size_t size, std::size_t) noexcept
38 {
39 return std::malloc(size);
40 }
41
42 static void deallocate(void* ptr, std::size_t, std::size_t) noexcept
43 {
44 std::free(ptr);
45 }
46
47 static std::size_t max_node_size() noexcept
48 {
49 return std::allocator_traits<std::allocator<char>>::max_size({});
50 }
51 };
52
53 FOONATHAN_MEMORY_LL_ALLOCATOR_LEAK_CHECKER(malloc_allocator_impl,
54 malloc_alloator_leak_checker)
55 } // namespace detail
56
57 /// A stateless \concept{concept_rawallocator,RawAllocator} that allocates memory using <tt>std::malloc()</tt>.
58 /// It throws \ref out_of_memory when the allocation fails.
59 /// \ingroup allocator
60 using malloc_allocator =
61 FOONATHAN_IMPL_DEFINED(detail::lowlevel_allocator<detail::malloc_allocator_impl>);
62
63 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
64 extern template class detail::lowlevel_allocator<detail::malloc_allocator_impl>;
65 extern template class allocator_traits<malloc_allocator>;
66 #endif
67 } // namespace memory
68 } // namespace foonathan
69
70 #endif //FOONATHAN_MEMORY_MALLOC_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_MEMORY_ARENA_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_MEMORY_ARENA_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::memory_arena and related functionality regarding \concept{concept_blockallocator,BlockAllocators}.
9
10 #include <type_traits>
11
12 #include "detail/debug_helpers.hpp"
13 #include "detail/assert.hpp"
14 #include "detail/utility.hpp"
15 #include "allocator_traits.hpp"
16 #include "config.hpp"
17 #include "default_allocator.hpp"
18 #include "error.hpp"
19
20 namespace foonathan
21 {
22 namespace memory
23 {
24 /// A memory block.
25 /// It is defined by its starting address and size.
26 /// \ingroup core
27 struct memory_block
28 {
29 void* memory; ///< The address of the memory block (might be \c nullptr).
30 std::size_t size; ///< The size of the memory block (might be \c 0).
31
32 /// \effects Creates an invalid memory block with starting address \c nullptr and size \c 0.
33 memory_block() noexcept : memory_block(nullptr, std::size_t(0)) {}
34
35 /// \effects Creates a memory block from a given starting address and size.
36 memory_block(void* mem, std::size_t s) noexcept : memory(mem), size(s) {}
37
38 /// \effects Creates a memory block from a [begin,end) range.
39 memory_block(void* begin, void* end) noexcept
40 : memory_block(begin, static_cast<std::size_t>(static_cast<char*>(end)
41 - static_cast<char*>(begin)))
42 {
43 }
44
45 /// \returns Whether or not a pointer is inside the memory.
46 bool contains(const void* address) const noexcept
47 {
48 auto mem = static_cast<const char*>(memory);
49 auto addr = static_cast<const char*>(address);
50 return addr >= mem && addr < mem + size;
51 }
52 };
53
54 namespace detail
55 {
56 template <class BlockAllocator>
57 std::true_type is_block_allocator_impl(
58 int,
59 FOONATHAN_SFINAE(std::declval<memory_block&>() =
60 std::declval<BlockAllocator&>().allocate_block()),
61 FOONATHAN_SFINAE(std::declval<std::size_t&>() =
62 std::declval<BlockAllocator&>().next_block_size()),
63 FOONATHAN_SFINAE(std::declval<BlockAllocator>().deallocate_block(memory_block{})));
64
65 template <typename T>
66 std::false_type is_block_allocator_impl(short);
67 } // namespace detail
68
69 /// Traits that check whether a type models concept \concept{concept_blockallocator,BlockAllocator}.
70 /// \ingroup core
71 template <typename T>
72 struct is_block_allocator : decltype(detail::is_block_allocator_impl<T>(0))
73 {
74 };
75
76 #if !defined(DOXYGEN)
77 template <class BlockAllocator, bool Cached = true>
78 class memory_arena;
79 #endif
80
81 /// @{
82 /// Controls the caching of \ref memory_arena.
83 /// By default, deallocated blocks are put onto a cache, so they can be reused later;
84 /// this tag value enable/disable it..<br>
85 /// This can be useful, e.g. if there will never be blocks available for deallocation.
86 /// The (tiny) overhead for the cache can then be disabled.
87 /// An example is \ref memory_pool.
88 /// \ingroup core
89 constexpr bool cached_arena = true;
90 constexpr bool uncached_arena = false;
91 /// @}
92
93 namespace detail
94 {
95 // stores memory block in an intrusive linked list and allows LIFO access
96 class memory_block_stack
97 {
98 public:
99 memory_block_stack() noexcept : head_(nullptr) {}
100
101 ~memory_block_stack() noexcept {}
102
103 memory_block_stack(memory_block_stack&& other) noexcept : head_(other.head_)
104 {
105 other.head_ = nullptr;
106 }
107
108 memory_block_stack& operator=(memory_block_stack&& other) noexcept
109 {
110 memory_block_stack tmp(detail::move(other));
111 swap(*this, tmp);
112 return *this;
113 }
114
115 friend void swap(memory_block_stack& a, memory_block_stack& b) noexcept
116 {
117 detail::adl_swap(a.head_, b.head_);
118 }
119
120 // the raw allocated block returned from an allocator
121 using allocated_mb = memory_block;
122
123 // the inserted block slightly smaller to allow for the fixup value
124 using inserted_mb = memory_block;
125
126 // how much an inserted block is smaller
127 static constexpr std::size_t implementation_offset() noexcept
128 {
129 // node size rounded up to the next multiple of max_alignment.
130 return (sizeof(node) / max_alignment + (sizeof(node) % max_alignment != 0))
131 * max_alignment;
132 }
133
134 // pushes a memory block
135 void push(allocated_mb block) noexcept;
136
137 // pops a memory block and returns the original block
138 allocated_mb pop() noexcept;
139
140 // steals the top block from another stack
141 void steal_top(memory_block_stack& other) noexcept;
142
143 // returns the last pushed() inserted memory block
144 inserted_mb top() const noexcept
145 {
146 FOONATHAN_MEMORY_ASSERT(head_);
147 auto mem = static_cast<void*>(head_);
148 return {static_cast<char*>(mem) + implementation_offset(), head_->usable_size};
149 }
150
151 bool empty() const noexcept
152 {
153 return head_ == nullptr;
154 }
155
156 bool owns(const void* ptr) const noexcept;
157
158 // O(n) size
159 std::size_t size() const noexcept;
160
161 private:
162 struct node
163 {
164 node* prev;
165 std::size_t usable_size;
166
167 node(node* p, std::size_t size) noexcept : prev(p), usable_size(size) {}
168 };
169
170 node* head_;
171 };
172
173 template <bool Cached>
174 class memory_arena_cache;
175
176 template <>
177 class memory_arena_cache<cached_arena>
178 {
179 protected:
180 bool cache_empty() const noexcept
181 {
182 return cached_.empty();
183 }
184
185 std::size_t cache_size() const noexcept
186 {
187 return cached_.size();
188 }
189
190 std::size_t cached_block_size() const noexcept
191 {
192 return cached_.top().size;
193 }
194
195 bool take_from_cache(detail::memory_block_stack& used) noexcept
196 {
197 if (cached_.empty())
198 return false;
199 used.steal_top(cached_);
200 return true;
201 }
202
203 template <class BlockAllocator>
204 void do_deallocate_block(BlockAllocator&, detail::memory_block_stack& used) noexcept
205 {
206 cached_.steal_top(used);
207 }
208
209 template <class BlockAllocator>
210 void do_shrink_to_fit(BlockAllocator& alloc) noexcept
211 {
212 detail::memory_block_stack to_dealloc;
213 // pop from cache and push to temporary stack
214 // this revers order
215 while (!cached_.empty())
216 to_dealloc.steal_top(cached_);
217 // now dealloc everything
218 while (!to_dealloc.empty())
219 alloc.deallocate_block(to_dealloc.pop());
220 }
221
222 private:
223 detail::memory_block_stack cached_;
224 };
225
226 template <>
227 class memory_arena_cache<uncached_arena>
228 {
229 protected:
230 bool cache_empty() const noexcept
231 {
232 return true;
233 }
234
235 std::size_t cache_size() const noexcept
236 {
237 return 0u;
238 }
239
240 std::size_t cached_block_size() const noexcept
241 {
242 return 0u;
243 }
244
245 bool take_from_cache(detail::memory_block_stack&) noexcept
246 {
247 return false;
248 }
249
250 template <class BlockAllocator>
251 void do_deallocate_block(BlockAllocator& alloc,
252 detail::memory_block_stack& used) noexcept
253 {
254 alloc.deallocate_block(used.pop());
255 }
256
257 template <class BlockAllocator>
258 void do_shrink_to_fit(BlockAllocator&) noexcept
259 {
260 }
261 };
262 } // namespace detail
263
264 /// A memory arena that manages huge memory blocks for a higher-level allocator.
265 /// Some allocators like \ref memory_stack work on huge memory blocks,
266 /// this class manages them fro those allocators.
267 /// It uses a \concept{concept_blockallocator,BlockAllocator} for the allocation of those blocks.
268 /// The memory blocks in use are put onto a stack like structure, deallocation will pop from the top,
269 /// so it is only possible to deallocate the last allocated block of the arena.
270 /// By default, blocks are not really deallocated but stored in a cache.
271 /// This can be disabled with the second template parameter,
272 /// passing it \ref uncached_arena (or \c false) disables it,
273 /// \ref cached_arena (or \c true) enables it explicitly.
274 /// \ingroup core
275 template <class BlockAllocator, bool Cached /* = true */>
276 class memory_arena : FOONATHAN_EBO(BlockAllocator),
277 FOONATHAN_EBO(detail::memory_arena_cache<Cached>)
278 {
279 static_assert(is_block_allocator<BlockAllocator>::value,
280 "BlockAllocator is not a BlockAllocator!");
281 using cache = detail::memory_arena_cache<Cached>;
282
283 public:
284 using allocator_type = BlockAllocator;
285 using is_cached = std::integral_constant<bool, Cached>;
286
287 /// \effects Creates it by giving it the size and other arguments for the \concept{concept_blockallocator,BlockAllocator}.
288 /// It forwards these arguments to its constructor.
289 /// \requires \c block_size must be greater than \c 0 and other requirements depending on the \concept{concept_blockallocator,BlockAllocator}.
290 /// \throws Anything thrown by the constructor of the \c BlockAllocator.
291 template <typename... Args>
292 explicit memory_arena(std::size_t block_size, Args&&... args)
293 : allocator_type(block_size, detail::forward<Args>(args)...)
294 {
295 }
296
297 /// \effects Deallocates all memory blocks that where requested back to the \concept{concept_blockallocator,BlockAllocator}.
298 ~memory_arena() noexcept
299 {
300 // clear cache
301 shrink_to_fit();
302 // now deallocate everything
303 while (!used_.empty())
304 allocator_type::deallocate_block(used_.pop());
305 }
306
307 /// @{
308 /// \effects Moves the arena.
309 /// The new arena takes ownership over all the memory blocks from the other arena object,
310 /// which is empty after that.
311 /// This does not invalidate any memory blocks.
312 memory_arena(memory_arena&& other) noexcept
313 : allocator_type(detail::move(other)),
314 cache(detail::move(other)),
315 used_(detail::move(other.used_))
316 {
317 }
318
319 memory_arena& operator=(memory_arena&& other) noexcept
320 {
321 memory_arena tmp(detail::move(other));
322 swap(*this, tmp);
323 return *this;
324 }
325 /// @}
326
327 /// \effects Swaps to memory arena objects.
328 /// This does not invalidate any memory blocks.
329 friend void swap(memory_arena& a, memory_arena& b) noexcept
330 {
331 detail::adl_swap(static_cast<allocator_type&>(a), static_cast<allocator_type&>(b));
332 detail::adl_swap(static_cast<cache&>(a), static_cast<cache&>(b));
333 detail::adl_swap(a.used_, b.used_);
334 }
335
336 /// \effects Allocates a new memory block.
337 /// It first uses a cache of previously deallocated blocks, if caching is enabled,
338 /// if it is empty, allocates a new one.
339 /// \returns The new \ref memory_block.
340 /// \throws Anything thrown by the \concept{concept_blockallocator,BlockAllocator} allocation function.
341 memory_block allocate_block()
342 {
343 if (!this->take_from_cache(used_))
344 used_.push(allocator_type::allocate_block());
345
346 auto block = used_.top();
347 detail::debug_fill_internal(block.memory, block.size, false);
348 return block;
349 }
350
351 /// \returns The current memory block.
352 /// This is the memory block that will be deallocated by the next call to \ref deallocate_block().
353 memory_block current_block() const noexcept
354 {
355 return used_.top();
356 }
357
358 /// \effects Deallocates the current memory block.
359 /// The current memory block is the block on top of the stack of blocks.
360 /// If caching is enabled, it does not really deallocate it but puts it onto a cache for later use,
361 /// use \ref shrink_to_fit() to purge that cache.
362 void deallocate_block() noexcept
363 {
364 auto block = used_.top();
365 detail::debug_fill_internal(block.memory, block.size, true);
366 this->do_deallocate_block(get_allocator(), used_);
367 }
368
369 /// \returns If `ptr` is in memory owned by the arena.
370 bool owns(const void* ptr) const noexcept
371 {
372 return used_.owns(ptr);
373 }
374
375 /// \effects Purges the cache of unused memory blocks by returning them.
376 /// The memory blocks will be deallocated in reversed order of allocation.
377 /// Does nothing if caching is disabled.
378 void shrink_to_fit() noexcept
379 {
380 this->do_shrink_to_fit(get_allocator());
381 }
382
383 /// \returns The capacity of the arena, i.e. how many blocks are used and cached.
384 std::size_t capacity() const noexcept
385 {
386 return size() + cache_size();
387 }
388
389 /// \returns The size of the cache, i.e. how many blocks can be allocated without allocation.
390 std::size_t cache_size() const noexcept
391 {
392 return cache::cache_size();
393 }
394
395 /// \returns The size of the arena, i.e. how many blocks are in use.
396 /// It is always smaller or equal to the \ref capacity().
397 std::size_t size() const noexcept
398 {
399 return used_.size();
400 }
401
402 /// \returns The size of the next memory block,
403 /// i.e. of the next call to \ref allocate_block().
404 /// If there are blocks in the cache, returns size of the next one.
405 /// Otherwise forwards to the \concept{concept_blockallocator,BlockAllocator} and subtracts an implementation offset.
406 std::size_t next_block_size() const noexcept
407 {
408 return this->cache_empty() ?
409 allocator_type::next_block_size()
410 - detail::memory_block_stack::implementation_offset() :
411 this->cached_block_size();
412 }
413
414 /// \returns A reference of the \concept{concept_blockallocator,BlockAllocator} object.
415 /// \requires It is undefined behavior to move this allocator out into another object.
416 allocator_type& get_allocator() noexcept
417 {
418 return *this;
419 }
420
421 private:
422 detail::memory_block_stack used_;
423 };
424
425 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
426 extern template class memory_arena<static_block_allocator, true>;
427 extern template class memory_arena<static_block_allocator, false>;
428 extern template class memory_arena<virtual_block_allocator, true>;
429 extern template class memory_arena<virtual_block_allocator, false>;
430 #endif
431
432 /// A \concept{concept_blockallocator,BlockAllocator} that uses a given \concept{concept_rawallocator,RawAllocator} for allocating the blocks.
433 /// It calls the \c allocate_array() function with a node of size \c 1 and maximum alignment on the used allocator for the block allocation.
434 /// The size of the next memory block will grow by a given factor after each allocation,
435 /// allowing an amortized constant allocation time in the higher level allocator.
436 /// The factor can be given as rational in the template parameter, default is \c 2.
437 /// \ingroup adapter
438 template <class RawAllocator = default_allocator, unsigned Num = 2, unsigned Den = 1>
439 class growing_block_allocator
440 : FOONATHAN_EBO(allocator_traits<RawAllocator>::allocator_type)
441 {
442 static_assert(float(Num) / Den >= 1.0, "invalid growth factor");
443
444 using traits = allocator_traits<RawAllocator>;
445
446 public:
447 using allocator_type = typename traits::allocator_type;
448
449 /// \effects Creates it by giving it the initial block size, the allocator object and the growth factor.
450 /// By default, it uses a default-constructed allocator object and a growth factor of \c 2.
451 /// \requires \c block_size must be greater than 0.
452 explicit growing_block_allocator(std::size_t block_size,
453 allocator_type alloc = allocator_type()) noexcept
454 : allocator_type(detail::move(alloc)), block_size_(block_size)
455 {
456 }
457
458 /// \effects Allocates a new memory block and increases the block size for the next allocation.
459 /// \returns The new \ref memory_block.
460 /// \throws Anything thrown by the \c allocate_array() function of the \concept{concept_rawallocator,RawAllocator}.
461 memory_block allocate_block()
462 {
463 auto memory =
464 traits::allocate_array(get_allocator(), block_size_, 1, detail::max_alignment);
465 memory_block block(memory, block_size_);
466 block_size_ = std::size_t(block_size_ * growth_factor());
467 return block;
468 }
469
470 /// \effects Deallocates a previously allocated memory block.
471 /// This does not decrease the block size.
472 /// \requires \c block must be previously returned by a call to \ref allocate_block().
473 void deallocate_block(memory_block block) noexcept
474 {
475 traits::deallocate_array(get_allocator(), block.memory, block.size, 1,
476 detail::max_alignment);
477 }
478
479 /// \returns The size of the memory block returned by the next call to \ref allocate_block().
480 std::size_t next_block_size() const noexcept
481 {
482 return block_size_;
483 }
484
485 /// \returns A reference to the used \concept{concept_rawallocator,RawAllocator} object.
486 allocator_type& get_allocator() noexcept
487 {
488 return *this;
489 }
490
491 /// \returns The growth factor.
492 static float growth_factor() noexcept
493 {
494 static constexpr auto factor = float(Num) / Den;
495 return factor;
496 }
497
498 private:
499 std::size_t block_size_;
500 };
501
502 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
503 extern template class growing_block_allocator<>;
504 extern template class memory_arena<growing_block_allocator<>, true>;
505 extern template class memory_arena<growing_block_allocator<>, false>;
506 #endif
507
508 /// A \concept{concept_blockallocator,BlockAllocator} that allows only one block allocation.
509 /// It can be used to prevent higher-level allocators from expanding.
510 /// The one block allocation is performed through the \c allocate_array() function of the given \concept{concept_rawallocator,RawAllocator}.
511 /// \ingroup adapter
512 template <class RawAllocator = default_allocator>
513 class fixed_block_allocator : FOONATHAN_EBO(allocator_traits<RawAllocator>::allocator_type)
514 {
515 using traits = allocator_traits<RawAllocator>;
516
517 public:
518 using allocator_type = typename traits::allocator_type;
519
520 /// \effects Creates it by passing it the size of the block and the allocator object.
521 /// \requires \c block_size must be greater than 0,
522 explicit fixed_block_allocator(std::size_t block_size,
523 allocator_type alloc = allocator_type()) noexcept
524 : allocator_type(detail::move(alloc)), block_size_(block_size)
525 {
526 }
527
528 /// \effects Allocates a new memory block or throws an exception if there was already one allocation.
529 /// \returns The new \ref memory_block.
530 /// \throws Anything thrown by the \c allocate_array() function of the \concept{concept_rawallocator,RawAllocator} or \ref out_of_memory if this is not the first call.
531 memory_block allocate_block()
532 {
533 if (block_size_)
534 {
535 auto mem = traits::allocate_array(get_allocator(), block_size_, 1,
536 detail::max_alignment);
537 memory_block block(mem, block_size_);
538 block_size_ = 0u;
539 return block;
540 }
541 FOONATHAN_THROW(out_of_fixed_memory(info(), block_size_));
542 }
543
544 /// \effects Deallocates the previously allocated memory block.
545 /// It also resets and allows a new call again.
546 void deallocate_block(memory_block block) noexcept
547 {
548 detail::debug_check_pointer([&] { return block_size_ == 0u; }, info(),
549 block.memory);
550 traits::deallocate_array(get_allocator(), block.memory, block.size, 1,
551 detail::max_alignment);
552 block_size_ = block.size;
553 }
554
555 /// \returns The size of the next block which is either the initial size or \c 0.
556 std::size_t next_block_size() const noexcept
557 {
558 return block_size_;
559 }
560
561 /// \returns A reference to the used \concept{concept_rawallocator,RawAllocator} object.
562 allocator_type& get_allocator() noexcept
563 {
564 return *this;
565 }
566
567 private:
568 allocator_info info() noexcept
569 {
570 return {FOONATHAN_MEMORY_LOG_PREFIX "::fixed_block_allocator", this};
571 }
572
573 std::size_t block_size_;
574 };
575
576 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
577 extern template class fixed_block_allocator<>;
578 extern template class memory_arena<fixed_block_allocator<>, true>;
579 extern template class memory_arena<fixed_block_allocator<>, false>;
580 #endif
581
582 namespace detail
583 {
584 template <class RawAlloc>
585 using default_block_wrapper = growing_block_allocator<RawAlloc>;
586
587 template <template <class...> class Wrapper, class BlockAllocator, typename... Args>
588 BlockAllocator make_block_allocator(std::true_type, std::size_t block_size,
589 Args&&... args)
590 {
591 return BlockAllocator(block_size, detail::forward<Args>(args)...);
592 }
593
594 template <template <class...> class Wrapper, class RawAlloc>
595 auto make_block_allocator(std::false_type, std::size_t block_size,
596 RawAlloc alloc = RawAlloc()) -> Wrapper<RawAlloc>
597 {
598 return Wrapper<RawAlloc>(block_size, detail::move(alloc));
599 }
600 } // namespace detail
601
602 /// Takes either a \concept{concept_blockallocator,BlockAllocator} or a \concept{concept_rawallocator,RawAllocator}.
603 /// In the first case simply aliases the type unchanged, in the second to \ref growing_block_allocator (or the template in `BlockAllocator`) with the \concept{concept_rawallocator,RawAllocator}.
604 /// Using this allows passing normal \concept{concept_rawallocator,RawAllocators} as \concept{concept_blockallocator,BlockAllocators}.
605 /// \ingroup core
606 template <class BlockOrRawAllocator,
607 template <typename...> class BlockAllocator = detail::default_block_wrapper>
608 using make_block_allocator_t = FOONATHAN_IMPL_DEFINED(
609 typename std::conditional<is_block_allocator<BlockOrRawAllocator>::value,
610 BlockOrRawAllocator,
611 BlockAllocator<BlockOrRawAllocator>>::type);
612
613 /// @{
614 /// Helper function make a \concept{concept_blockallocator,BlockAllocator}.
615 /// \returns A \concept{concept_blockallocator,BlockAllocator} of the given type created with the given arguments.
616 /// \requires Same requirements as the constructor.
617 /// \ingroup core
618 template <class BlockOrRawAllocator, typename... Args>
619 make_block_allocator_t<BlockOrRawAllocator> make_block_allocator(std::size_t block_size,
620 Args&&... args)
621 {
622 return detail::make_block_allocator<
623 detail::default_block_wrapper,
624 BlockOrRawAllocator>(is_block_allocator<BlockOrRawAllocator>{}, block_size,
625 detail::forward<Args>(args)...);
626 }
627
628 template <template <class...> class BlockAllocator, class BlockOrRawAllocator,
629 typename... Args>
630 make_block_allocator_t<BlockOrRawAllocator, BlockAllocator> make_block_allocator(
631 std::size_t block_size, Args&&... args)
632 {
633 return detail::make_block_allocator<
634 BlockAllocator, BlockOrRawAllocator>(is_block_allocator<BlockOrRawAllocator>{},
635 block_size, detail::forward<Args>(args)...);
636 }
637 /// @}
638
639 namespace literals
640 {
641 /// Syntax sugar to express sizes with unit prefixes.
642 /// \returns The number of bytes `value` is in the given unit.
643 /// \ingroup core
644 /// @{
645 constexpr std::size_t operator"" _KiB(unsigned long long value) noexcept
646 {
647 return std::size_t(value * 1024);
648 }
649
650 constexpr std::size_t operator"" _KB(unsigned long long value) noexcept
651 {
652 return std::size_t(value * 1000);
653 }
654
655 constexpr std::size_t operator"" _MiB(unsigned long long value) noexcept
656 {
657 return std::size_t(value * 1024 * 1024);
658 }
659
660 constexpr std::size_t operator"" _MB(unsigned long long value) noexcept
661 {
662 return std::size_t(value * 1000 * 1000);
663 }
664
665 constexpr std::size_t operator"" _GiB(unsigned long long value) noexcept
666 {
667 return std::size_t(value * 1024 * 1024 * 1024);
668 }
669
670 constexpr std::size_t operator"" _GB(unsigned long long value) noexcept
671 {
672 return std::size_t(value * 1000 * 1000 * 1000);
673 }
674 } // namespace literals
675 } // namespace memory
676 } // namespace foonathan
677
678 #endif // FOONATHAN_MEMORY_MEMORY_ARENA_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_MEMORY_POOL_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_MEMORY_POOL_HPP_INCLUDED
6
7 // Inform that foonathan::memory::memory_pool::min_block_size API is available
8 #define FOONATHAN_MEMORY_MEMORY_POOL_HAS_MIN_BLOCK_SIZE
9
10 /// \file
11 /// Class \ref foonathan::memory::memory_pool and its \ref foonathan::memory::allocator_traits specialization.
12
13 #include <type_traits>
14
15 #include "detail/align.hpp"
16 #include "detail/debug_helpers.hpp"
17 #include "detail/assert.hpp"
18 #include "config.hpp"
19 #include "error.hpp"
20 #include "memory_arena.hpp"
21 #include "memory_pool_type.hpp"
22
23 namespace foonathan
24 {
25 namespace memory
26 {
27 namespace detail
28 {
29 struct memory_pool_leak_handler
30 {
31 void operator()(std::ptrdiff_t amount);
32 };
33 } // namespace detail
34
35 /// A stateful \concept{concept_rawallocator,RawAllocator} that manages \concept{concept_node,nodes} of fixed size.
36 /// It uses a \ref memory_arena with a given \c BlockOrRawAllocator defaulting to \ref growing_block_allocator,
37 /// subdivides them in small nodes of given size and puts them onto a free list.
38 /// Allocation and deallocation simply remove or add nodes from this list and are thus fast.
39 /// The way the list is maintained can be controlled via the \c PoolType
40 /// which is either \ref node_pool, \ref array_pool or \ref small_node_pool.<br>
41 /// This kind of allocator is ideal for fixed size allocations and deallocations in any order,
42 /// for example in a node based container like \c std::list.
43 /// It is not so good for different allocation sizes and has some drawbacks for arrays
44 /// as described in \ref memory_pool_type.hpp.
45 /// \ingroup allocator
46 template <typename PoolType = node_pool, class BlockOrRawAllocator = default_allocator>
47 class memory_pool
48 : FOONATHAN_EBO(detail::default_leak_checker<detail::memory_pool_leak_handler>)
49 {
50 using free_list = typename PoolType::type;
51 using leak_checker = detail::default_leak_checker<detail::memory_pool_leak_handler>;
52
53 public:
54 using allocator_type = make_block_allocator_t<BlockOrRawAllocator>;
55 using pool_type = PoolType;
56
57 static constexpr std::size_t min_node_size =
58 FOONATHAN_IMPL_DEFINED(free_list::min_element_size);
59
60 /// \returns The minimum block size required for certain number of \concept{concept_node,node}.
61 /// \requires \c node_size must be a valid \concept{concept_node,node size}
62 /// and \c number_of_nodes must be a non-zero value.
63 static constexpr std::size_t min_block_size(std::size_t node_size,
64 std::size_t number_of_nodes) noexcept
65 {
66 return detail::memory_block_stack::implementation_offset()
67 + number_of_nodes
68 * (((node_size > min_node_size) ? node_size : min_node_size)
69 + (detail::debug_fence_size ? 2 * detail::max_alignment : 0));
70 }
71
72 /// \effects Creates it by specifying the size each \concept{concept_node,node} will have,
73 /// the initial block size for the arena and other constructor arguments for the \concept{concept_blockallocator,BlockAllocator}.
74 /// If the \c node_size is less than the \c min_node_size, the \c min_node_size will be the actual node size.
75 /// It will allocate an initial memory block with given size from the \concept{concept_blockallocator,BlockAllocator}
76 /// and puts it onto the free list.
77 /// \requires \c node_size must be a valid \concept{concept_node,node size}
78 /// and \c block_size must be a non-zero value.
79 template <typename... Args>
80 memory_pool(std::size_t node_size, std::size_t block_size, Args&&... args)
81 : arena_(block_size, detail::forward<Args>(args)...), free_list_(node_size)
82 {
83 allocate_block();
84 }
85
86 /// \effects Destroys the \ref memory_pool by returning all memory blocks,
87 /// regardless of properly deallocated back to the \concept{concept_blockallocator,BlockAllocator}.
88 ~memory_pool() noexcept {}
89
90 /// @{
91 /// \effects Moving a \ref memory_pool object transfers ownership over the free list,
92 /// i.e. the moved from pool is completely empty and the new one has all its memory.
93 /// That means that it is not allowed to call \ref deallocate_node() on a moved-from allocator
94 /// even when passing it memory that was previously allocated by this object.
95 memory_pool(memory_pool&& other) noexcept
96 : leak_checker(detail::move(other)),
97 arena_(detail::move(other.arena_)),
98 free_list_(detail::move(other.free_list_))
99 {
100 }
101
102 memory_pool& operator=(memory_pool&& other) noexcept
103 {
104 leak_checker::operator=(detail::move(other));
105 arena_ = detail::move(other.arena_);
106 free_list_ = detail::move(other.free_list_);
107 return *this;
108 }
109 /// @}
110
111 /// \effects Allocates a single \concept{concept_node,node} by removing it from the free list.
112 /// If the free list is empty, a new memory block will be allocated from the arena and put onto it.
113 /// The new block size will be \ref next_capacity() big.
114 /// \returns A node of size \ref node_size() suitable aligned,
115 /// i.e. suitable for any type where <tt>sizeof(T) < node_size()</tt>.
116 /// \throws Anything thrown by the used \concept{concept_blockallocator,BlockAllocator}'s allocation function if a growth is needed.
117 void* allocate_node()
118 {
119 if (free_list_.empty())
120 allocate_block();
121 FOONATHAN_MEMORY_ASSERT(!free_list_.empty());
122 return free_list_.allocate();
123 }
124
125 /// \effects Allocates a single \concept{concept_node,node} similar to \ref allocate_node().
126 /// But if the free list is empty, a new block will *not* be allocated.
127 /// \returns A suitable aligned node of size \ref node_size() or `nullptr`.
128 void* try_allocate_node() noexcept
129 {
130 return free_list_.empty() ? nullptr : free_list_.allocate();
131 }
132
133 /// \effects Allocates an \concept{concept_array,array} of nodes by searching for \c n continuous nodes on the list and removing them.
134 /// Depending on the \c PoolType this can be a slow operation or not allowed at all.
135 /// This can sometimes lead to a growth, even if technically there is enough continuous memory on the free list.
136 /// \returns An array of \c n nodes of size \ref node_size() suitable aligned.
137 /// \throws Anything thrown by the used \concept{concept_blockallocator,BlockAllocator}'s allocation function if a growth is needed,
138 /// or \ref bad_array_size if <tt>n * node_size()</tt> is too big.
139 /// \requires \c n must be valid \concept{concept_array,array count}.
140 void* allocate_array(std::size_t n)
141 {
142 detail::check_allocation_size<bad_array_size>(
143 n * node_size(), [&] { return pool_type::value ? next_capacity() : 0; },
144 info());
145 return allocate_array(n, node_size());
146 }
147
148 /// \effects Allocates an \concept{concept_array,array} of nodes similar to \ref allocate_array().
149 /// But it will never allocate a new memory block.
150 /// \returns An array of \c n nodes of size \ref node_size() suitable aligned
151 /// or `nullptr`.
152 void* try_allocate_array(std::size_t n) noexcept
153 {
154 return try_allocate_array(n, node_size());
155 }
156
157 /// \effects Deallocates a single \concept{concept_node,node} by putting it back onto the free list.
158 /// \requires \c ptr must be a result from a previous call to \ref allocate_node() on the same free list,
159 /// i.e. either this allocator object or a new object created by moving this to it.
160 void deallocate_node(void* ptr) noexcept
161 {
162 free_list_.deallocate(ptr);
163 }
164
165 /// \effects Deallocates a single \concept{concept_node,node} but it does not be a result of a previous call to \ref allocate_node().
166 /// \returns `true` if the node could be deallocated, `false` otherwise.
167 /// \note Some free list implementations can deallocate any memory,
168 /// doesn't matter where it is coming from.
169 bool try_deallocate_node(void* ptr) noexcept
170 {
171 if (!arena_.owns(ptr))
172 return false;
173 free_list_.deallocate(ptr);
174 return true;
175 }
176
177 /// \effects Deallocates an \concept{concept_array,array} by putting it back onto the free list.
178 /// \requires \c ptr must be a result from a previous call to \ref allocate_array() with the same \c n on the same free list,
179 /// i.e. either this allocator object or a new object created by moving this to it.
180 void deallocate_array(void* ptr, std::size_t n) noexcept
181 {
182 FOONATHAN_MEMORY_ASSERT_MSG(pool_type::value, "does not support array allocations");
183 free_list_.deallocate(ptr, n * node_size());
184 }
185
186 /// \effects Deallocates an \concept{concept_array,array} but it does not be a result of a previous call to \ref allocate_array().
187 /// \returns `true` if the node could be deallocated, `false` otherwise.
188 /// \note Some free list implementations can deallocate any memory,
189 /// doesn't matter where it is coming from.
190 bool try_deallocate_array(void* ptr, std::size_t n) noexcept
191 {
192 return try_deallocate_array(ptr, n, node_size());
193 }
194
195 /// \returns The size of each \concept{concept_node,node} in the pool,
196 /// this is either the same value as in the constructor or \c min_node_size if the value was too small.
197 std::size_t node_size() const noexcept
198 {
199 return free_list_.node_size();
200 }
201
202 /// \effects Returns the total amount of bytes remaining on the free list.
203 /// Divide it by \ref node_size() to get the number of nodes that can be allocated without growing the arena.
204 /// \note Array allocations may lead to a growth even if the capacity_left left is big enough.
205 std::size_t capacity_left() const noexcept
206 {
207 return free_list_.capacity() * node_size();
208 }
209
210 /// \returns The size of the next memory block after the free list gets empty and the arena grows.
211 /// \ref capacity_left() will increase by this amount.
212 /// \note Due to fence memory in debug mode this cannot be just divided by the \ref node_size() to get the number of nodes.
213 std::size_t next_capacity() const noexcept
214 {
215 return free_list_.usable_size(arena_.next_block_size());
216 }
217
218 /// \returns A reference to the \concept{concept_blockallocator,BlockAllocator} used for managing the arena.
219 /// \requires It is undefined behavior to move this allocator out into another object.
220 allocator_type& get_allocator() noexcept
221 {
222 return arena_.get_allocator();
223 }
224
225 private:
226 allocator_info info() const noexcept
227 {
228 return {FOONATHAN_MEMORY_LOG_PREFIX "::memory_pool", this};
229 }
230
231 void allocate_block()
232 {
233 auto mem = arena_.allocate_block();
234 free_list_.insert(static_cast<char*>(mem.memory), mem.size);
235 }
236
237 void* allocate_array(std::size_t n, std::size_t node_size)
238 {
239 auto mem = free_list_.empty() ? nullptr : free_list_.allocate(n * node_size);
240 if (!mem)
241 {
242 allocate_block();
243 mem = free_list_.allocate(n * node_size);
244 if (!mem)
245 FOONATHAN_THROW(bad_array_size(info(), n * node_size, capacity_left()));
246 }
247 return mem;
248 }
249
250 void* try_allocate_array(std::size_t n, std::size_t node_size) noexcept
251 {
252 return !pool_type::value || free_list_.empty() ? nullptr :
253 free_list_.allocate(n * node_size);
254 }
255
256 bool try_deallocate_array(void* ptr, std::size_t n, std::size_t node_size) noexcept
257 {
258 if (!pool_type::value || !arena_.owns(ptr))
259 return false;
260 free_list_.deallocate(ptr, n * node_size);
261 return true;
262 }
263
264 memory_arena<allocator_type, false> arena_;
265 free_list free_list_;
266
267 friend allocator_traits<memory_pool<PoolType, BlockOrRawAllocator>>;
268 friend composable_allocator_traits<memory_pool<PoolType, BlockOrRawAllocator>>;
269 };
270
271 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
272 extern template class memory_pool<node_pool>;
273 extern template class memory_pool<array_pool>;
274 extern template class memory_pool<small_node_pool>;
275 #endif
276
277 template <class Type, class Alloc>
278 constexpr std::size_t memory_pool<Type, Alloc>::min_node_size;
279
280 /// Specialization of the \ref allocator_traits for \ref memory_pool classes.
281 /// \note It is not allowed to mix calls through the specialization and through the member functions,
282 /// i.e. \ref memory_pool::allocate_node() and this \c allocate_node().
283 /// \ingroup allocator
284 template <typename PoolType, class ImplRawAllocator>
285 class allocator_traits<memory_pool<PoolType, ImplRawAllocator>>
286 {
287 public:
288 using allocator_type = memory_pool<PoolType, ImplRawAllocator>;
289 using is_stateful = std::true_type;
290
291 /// \returns The result of \ref memory_pool::allocate_node().
292 /// \throws Anything thrown by the pool allocation function
293 /// or a \ref bad_allocation_size exception.
294 static void* allocate_node(allocator_type& state, std::size_t size,
295 std::size_t alignment)
296 {
297 detail::check_allocation_size<bad_node_size>(size, max_node_size(state),
298 state.info());
299 detail::check_allocation_size<bad_alignment>(
300 alignment, [&] { return max_alignment(state); }, state.info());
301 auto mem = state.allocate_node();
302 state.on_allocate(size);
303 return mem;
304 }
305
306 /// \effects Forwards to \ref memory_pool::allocate_array()
307 /// with the number of nodes adjusted to be the minimum,
308 /// i.e. when the \c size is less than the \ref memory_pool::node_size().
309 /// \returns A \concept{concept_array,array} with specified properties.
310 /// \requires The \ref memory_pool has to support array allocations.
311 /// \throws Anything thrown by the pool allocation function.
312 static void* allocate_array(allocator_type& state, std::size_t count, std::size_t size,
313 std::size_t alignment)
314 {
315 detail::check_allocation_size<bad_node_size>(size, max_node_size(state),
316 state.info());
317 detail::check_allocation_size<bad_alignment>(
318 alignment, [&] { return max_alignment(state); }, state.info());
319 detail::check_allocation_size<bad_array_size>(count * size, max_array_size(state),
320 state.info());
321 auto mem = state.allocate_array(count, size);
322 state.on_allocate(count * size);
323 return mem;
324 }
325
326 /// \effects Just forwards to \ref memory_pool::deallocate_node().
327 static void deallocate_node(allocator_type& state, void* node, std::size_t size,
328 std::size_t) noexcept
329 {
330 state.deallocate_node(node);
331 state.on_deallocate(size);
332 }
333
334 /// \effects Forwards to \ref memory_pool::deallocate_array() with the same size adjustment.
335 static void deallocate_array(allocator_type& state, void* array, std::size_t count,
336 std::size_t size, std::size_t) noexcept
337 {
338 state.free_list_.deallocate(array, count * size);
339 state.on_deallocate(count * size);
340 }
341
342 /// \returns The maximum size of each node which is \ref memory_pool::node_size().
343 static std::size_t max_node_size(const allocator_type& state) noexcept
344 {
345 return state.node_size();
346 }
347
348 /// \returns An upper bound on the maximum array size which is \ref memory_pool::next_capacity().
349 static std::size_t max_array_size(const allocator_type& state) noexcept
350 {
351 return state.next_capacity();
352 }
353
354 /// \returns The maximum alignment which is the next bigger power of two if less than \c alignof(std::max_align_t)
355 /// or the maximum alignment itself otherwise.
356 static std::size_t max_alignment(const allocator_type& state) noexcept
357 {
358 return state.free_list_.alignment();
359 }
360 };
361
362 /// Specialization of the \ref composable_allocator_traits for \ref memory_pool classes.
363 /// \ingroup allocator
364 template <typename PoolType, class BlockOrRawAllocator>
365 class composable_allocator_traits<memory_pool<PoolType, BlockOrRawAllocator>>
366 {
367 using traits = allocator_traits<memory_pool<PoolType, BlockOrRawAllocator>>;
368
369 public:
370 using allocator_type = memory_pool<PoolType, BlockOrRawAllocator>;
371
372 /// \returns The result of \ref memory_pool::try_allocate_node()
373 /// or `nullptr` if the allocation size was too big.
374 static void* try_allocate_node(allocator_type& state, std::size_t size,
375 std::size_t alignment) noexcept
376 {
377 if (size > traits::max_node_size(state) || alignment > traits::max_alignment(state))
378 return nullptr;
379 return state.try_allocate_node();
380 }
381
382 /// \effects Forwards to \ref memory_pool::try_allocate_array()
383 /// with the number of nodes adjusted to be the minimum,
384 /// if the \c size is less than the \ref memory_pool::node_size().
385 /// \returns A \concept{concept_array,array} with specified properties
386 /// or `nullptr` if it was unable to allocate.
387 static void* try_allocate_array(allocator_type& state, std::size_t count,
388 std::size_t size, std::size_t alignment) noexcept
389 {
390 if (size > traits::max_node_size(state)
391 || count * size > traits::max_array_size(state)
392 || alignment > traits::max_alignment(state))
393 return nullptr;
394 return state.try_allocate_array(count, size);
395 }
396
397 /// \effects Just forwards to \ref memory_pool::try_deallocate_node().
398 /// \returns Whether the deallocation was successful.
399 static bool try_deallocate_node(allocator_type& state, void* node, std::size_t size,
400 std::size_t alignment) noexcept
401 {
402 if (size > traits::max_node_size(state) || alignment > traits::max_alignment(state))
403 return false;
404 return state.try_deallocate_node(node);
405 }
406
407 /// \effects Forwards to \ref memory_pool::deallocate_array() with the same size adjustment.
408 /// \returns Whether the deallocation was successful.
409 static bool try_deallocate_array(allocator_type& state, void* array, std::size_t count,
410 std::size_t size, std::size_t alignment) noexcept
411 {
412 if (size > traits::max_node_size(state)
413 || count * size > traits::max_array_size(state)
414 || alignment > traits::max_alignment(state))
415 return false;
416 return state.try_deallocate_array(array, count, size);
417 }
418 };
419
420 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
421 extern template class allocator_traits<memory_pool<node_pool>>;
422 extern template class allocator_traits<memory_pool<array_pool>>;
423 extern template class allocator_traits<memory_pool<small_node_pool>>;
424
425 extern template class composable_allocator_traits<memory_pool<node_pool>>;
426 extern template class composable_allocator_traits<memory_pool<array_pool>>;
427 extern template class composable_allocator_traits<memory_pool<small_node_pool>>;
428 #endif
429 } // namespace memory
430 } // namespace foonathan
431
432 #endif // FOONATHAN_MEMORY_MEMORY_POOL_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_MEMORY_POOL_COLLECTION_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_MEMORY_POOL_COLLECTION_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::memory_pool_collection and related classes.
9
10 #include <type_traits>
11
12 #include "detail/align.hpp"
13 #include "detail/assert.hpp"
14 #include "detail/memory_stack.hpp"
15 #include "detail/free_list_array.hpp"
16 #include "config.hpp"
17 #include "debugging.hpp"
18 #include "error.hpp"
19 #include "memory_arena.hpp"
20 #include "memory_pool_type.hpp"
21
22 namespace foonathan
23 {
24 namespace memory
25 {
26 namespace detail
27 {
28 struct memory_pool_collection_leak_handler
29 {
30 void operator()(std::ptrdiff_t amount);
31 };
32 } // namespace detail
33
34 /// A \c BucketDistribution for \ref memory_pool_collection defining that there is a bucket, i.e. pool, for each size.
35 /// That means that for each possible size up to an upper bound there will be a seperate free list.
36 /// Allocating a node will not waste any memory.
37 /// \ingroup allocator
38 struct identity_buckets
39 {
40 using type = detail::identity_access_policy;
41 };
42
43 /// A \c BucketDistribution for \ref memory_pool_collection defining that there is a bucket, i.e. pool, for each power of two.
44 /// That means for each power of two up to an upper bound there will be a separate free list.
45 /// Allocating a node will only waste half of the memory.
46 /// \ingroup allocator
47 struct log2_buckets
48 {
49 using type = detail::log2_access_policy;
50 };
51
52 /// A stateful \concept{concept_rawallocator,RawAllocator} that behaves as a collection of multiple \ref memory_pool objects.
53 /// It maintains a list of multiple free lists, whose types are controlled via the \c PoolType tags defined in \ref memory_pool_type.hpp,
54 /// each of a different size as defined in the \c BucketDistribution (\ref identity_buckets or \ref log2_buckets).
55 /// Allocating a node of given size will use the appropriate free list.<br>
56 /// This allocator is ideal for \concept{concept_node,node} allocations in any order but with a predefined set of sizes,
57 /// not only one size like \ref memory_pool.
58 /// \ingroup allocator
59 template <class PoolType, class BucketDistribution,
60 class BlockOrRawAllocator = default_allocator>
61 class memory_pool_collection
62 : FOONATHAN_EBO(detail::default_leak_checker<detail::memory_pool_collection_leak_handler>)
63 {
64 using free_list_array =
65 detail::free_list_array<typename PoolType::type, typename BucketDistribution::type>;
66 using leak_checker =
67 detail::default_leak_checker<detail::memory_pool_collection_leak_handler>;
68
69 public:
70 using allocator_type = make_block_allocator_t<BlockOrRawAllocator>;
71 using pool_type = PoolType;
72 using bucket_distribution = BucketDistribution;
73
74 /// \effects Creates it by giving it the maximum node size it should be able to allocate,
75 /// the size of the initial memory block and other constructor arguments for the \concept{concept_blockallocator,BlockAllocator}.
76 /// The \c BucketDistribution controls how many free lists are created,
77 /// but unlike in \ref memory_pool all free lists are initially empty and the first memory block queued.
78 /// \requires \c max_node_size must be a valid \concept{concept_node,node} size
79 /// and \c block_size must be non-zero.
80 template <typename... Args>
81 memory_pool_collection(std::size_t max_node_size, std::size_t block_size,
82 Args&&... args)
83 : arena_(block_size, detail::forward<Args>(args)...),
84 stack_(allocate_block()),
85 pools_(stack_, block_end(), max_node_size)
86 {
87 }
88
89 /// \effects Destroys the \ref memory_pool_collection by returning all memory blocks,
90 /// regardless of properly deallocated back to the \concept{concept_blockallocator,BlockAllocator}.
91 ~memory_pool_collection() noexcept = default;
92
93 /// @{
94 /// \effects Moving a \ref memory_pool_collection object transfers ownership over the free lists,
95 /// i.e. the moved from pool is completely empty and the new one has all its memory.
96 /// That means that it is not allowed to call \ref deallocate_node() on a moved-from allocator
97 /// even when passing it memory that was previously allocated by this object.
98 memory_pool_collection(memory_pool_collection&& other) noexcept
99 : leak_checker(detail::move(other)),
100 arena_(detail::move(other.arena_)),
101 stack_(detail::move(other.stack_)),
102 pools_(detail::move(other.pools_))
103 {
104 }
105
106 memory_pool_collection& operator=(memory_pool_collection&& other) noexcept
107 {
108 leak_checker::operator=(detail::move(other));
109 arena_ = detail::move(other.arena_);
110 stack_ = detail::move(other.stack_);
111 pools_ = detail::move(other.pools_);
112 return *this;
113 }
114 /// @}
115
116 /// \effects Allocates a \concept{concept_node,node} of given size.
117 /// It first finds the appropriate free list as defined in the \c BucketDistribution.
118 /// If it is empty, it will use an implementation defined amount of memory from the arena
119 /// and inserts it in it.
120 /// If the arena is empty too, it will request a new memory block from the \concept{concept_blockallocator,BlockAllocator}
121 /// of size \ref next_capacity() and puts part of it onto this free list.
122 /// Then it removes a node from it.
123 /// \returns A \concept{concept_node,node} of given size suitable aligned,
124 /// i.e. suitable for any type where <tt>sizeof(T) < node_size</tt>.
125 /// \throws Anything thrown by the \concept{concept_blockallocator,BlockAllocator} if a growth is needed or a \ref bad_node_size exception if the node size is too big.
126 void* allocate_node(std::size_t node_size)
127 {
128 detail::check_allocation_size<bad_node_size>(
129 node_size, [&] { return max_node_size(); }, info());
130 auto& pool = pools_.get(node_size);
131 if (pool.empty())
132 {
133 auto block = reserve_memory(pool, def_capacity());
134 pool.insert(block.memory, block.size);
135 }
136
137 auto mem = pool.allocate();
138 FOONATHAN_MEMORY_ASSERT(mem);
139 return mem;
140 }
141
142 /// \effects Allocates a \concept{concept_node,node} of given size.
143 /// It is similar to \ref allocate_node() but will return `nullptr` on any failure,
144 /// instead of growing the arnea and possibly throwing.
145 /// \returns A \concept{concept_node,node} of given size suitable aligned
146 /// or `nullptr` in case of failure.
147 void* try_allocate_node(std::size_t node_size) noexcept
148 {
149 if (node_size > max_node_size())
150 return nullptr;
151 auto& pool = pools_.get(node_size);
152 if (pool.empty())
153 {
154 try_reserve_memory(pool, def_capacity());
155 return pool.empty() ? nullptr : pool.allocate();
156 }
157 else
158 return pool.allocate();
159 }
160
161 /// \effects Allocates an \concept{concept_array,array} of nodes by searching for \c n continuous nodes on the appropriate free list and removing them.
162 /// Depending on the \c PoolType this can be a slow operation or not allowed at all.
163 /// This can sometimes lead to a growth on the free list, even if technically there is enough continuous memory on the free list.
164 /// Otherwise has the same behavior as \ref allocate_node().
165 /// \returns An array of \c n nodes of size \c node_size suitable aligned.
166 /// \throws Anything thrown by the used \concept{concept_blockallocator,BlockAllocator}'s allocation function if a growth is needed,
167 /// or a \ref bad_allocation_size exception.
168 /// \requires \c count must be valid \concept{concept_array,array count} and
169 /// \c node_size must be valid \concept{concept_node,node size}.
170 void* allocate_array(std::size_t count, std::size_t node_size)
171 {
172 detail::check_allocation_size<bad_node_size>(
173 node_size, [&] { return max_node_size(); }, info());
174
175 auto& pool = pools_.get(node_size);
176
177 // try allocating if not empty
178 // for pools without array allocation support, allocate() will always return nullptr
179 auto mem = pool.empty() ? nullptr : pool.allocate(count * node_size);
180 if (!mem)
181 {
182 // use stack for allocation
183 detail::check_allocation_size<bad_array_size>(
184 count * node_size, [&] { return next_capacity() - pool.alignment() + 1; },
185 info());
186 mem = reserve_memory(pool, count * node_size).memory;
187 FOONATHAN_MEMORY_ASSERT(mem);
188 }
189
190 return mem;
191 }
192
193 /// \effects Allocates a \concept{concept_array,array} of given size.
194 /// It is similar to \ref allocate_node() but will return `nullptr` on any failure,
195 /// instead of growing the arnea and possibly throwing.
196 /// \returns A \concept{concept_array,array} of given size suitable aligned
197 /// or `nullptr` in case of failure.
198 void* try_allocate_array(std::size_t count, std::size_t node_size) noexcept
199 {
200 if (!pool_type::value || node_size > max_node_size())
201 return nullptr;
202 auto& pool = pools_.get(node_size);
203 if (pool.empty())
204 {
205 try_reserve_memory(pool, def_capacity());
206 return pool.empty() ? nullptr : pool.allocate(count * node_size);
207 }
208 else
209 return pool.allocate(count * node_size);
210 }
211
212 /// \effects Deallocates a \concept{concept_node,node} by putting it back onto the appropriate free list.
213 /// \requires \c ptr must be a result from a previous call to \ref allocate_node() with the same size on the same free list,
214 /// i.e. either this allocator object or a new object created by moving this to it.
215 void deallocate_node(void* ptr, std::size_t node_size) noexcept
216 {
217 pools_.get(node_size).deallocate(ptr);
218 }
219
220 /// \effects Deallocates a \concept{concept_node,node} similar to \ref deallocate_node().
221 /// But it checks if it can deallocate this memory.
222 /// \returns `true` if the node could be deallocated,
223 /// `false` otherwise.
224 bool try_deallocate_node(void* ptr, std::size_t node_size) noexcept
225 {
226 if (node_size > max_node_size() || !arena_.owns(ptr))
227 return false;
228 pools_.get(node_size).deallocate(ptr);
229 return true;
230 }
231
232 /// \effects Deallocates an \concept{concept_array,array} by putting it back onto the free list.
233 /// \requires \c ptr must be a result from a previous call to \ref allocate_array() with the same sizes on the same free list,
234 /// i.e. either this allocator object or a new object created by moving this to it.
235 void deallocate_array(void* ptr, std::size_t count, std::size_t node_size) noexcept
236 {
237 pools_.get(node_size).deallocate(ptr, count * node_size);
238 }
239
240 /// \effects Deallocates a \concept{concept_array,array} similar to \ref deallocate_array().
241 /// But it checks if it can deallocate this memory.
242 /// \returns `true` if the array could be deallocated,
243 /// `false` otherwise.
244 bool try_deallocate_array(void* ptr, std::size_t count, std::size_t node_size) noexcept
245 {
246 if (!pool_type::value || node_size > max_node_size() || !arena_.owns(ptr))
247 return false;
248 pools_.get(node_size).deallocate(ptr, count * node_size);
249 return true;
250 }
251
252 /// \effects Inserts more memory on the free list for nodes of given size.
253 /// It will try to put \c capacity_left bytes from the arena onto the free list defined over the \c BucketDistribution,
254 /// if the arena is empty, a new memory block is requested from the \concept{concept_blockallocator,BlockAllocator}
255 /// and it will be used.
256 /// \throws Anything thrown by the \concept{concept_blockallocator,BlockAllocator} if a growth is needed.
257 /// \requires \c node_size must be valid \concept{concept_node,node size} less than or equal to \ref max_node_size(),
258 /// \c capacity_left must be less than \ref next_capacity().
259 void reserve(std::size_t node_size, std::size_t capacity)
260 {
261 FOONATHAN_MEMORY_ASSERT_MSG(node_size <= max_node_size(), "node_size too big");
262 auto& pool = pools_.get(node_size);
263 reserve_memory(pool, capacity);
264 }
265
266 /// \returns The maximum node size for which is a free list.
267 /// This is the value passed to it in the constructor.
268 std::size_t max_node_size() const noexcept
269 {
270 return pools_.max_node_size();
271 }
272
273 /// \returns The amount of nodes available in the free list for nodes of given size
274 /// as defined over the \c BucketDistribution.
275 /// This is the number of nodes that can be allocated without the free list requesting more memory from the arena.
276 /// \note Array allocations may lead to a growth even if the capacity_left is big enough.
277 std::size_t pool_capacity_left(std::size_t node_size) const noexcept
278 {
279 FOONATHAN_MEMORY_ASSERT_MSG(node_size <= max_node_size(), "node_size too big");
280 return pools_.get(node_size).capacity();
281 }
282
283 /// \returns The amount of memory available in the arena not inside the free lists.
284 /// This is the number of bytes that can be inserted into the free lists
285 /// without requesting more memory from the \concept{concept_blockallocator,BlockAllocator}.
286 /// \note Array allocations may lead to a growth even if the capacity is big enough.
287 std::size_t capacity_left() const noexcept
288 {
289 return std::size_t(block_end() - stack_.top());
290 }
291
292 /// \returns The size of the next memory block after \ref capacity_left() arena grows.
293 /// This is the amount of memory that can be distributed in the pools.
294 /// \note If the `PoolType` is \ref small_node_pool, the exact usable memory is lower than that.
295 std::size_t next_capacity() const noexcept
296 {
297 return arena_.next_block_size();
298 }
299
300 /// \returns A reference to the \concept{concept_blockallocator,BlockAllocator} used for managing the arena.
301 /// \requires It is undefined behavior to move this allocator out into another object.
302 allocator_type& get_allocator() noexcept
303 {
304 return arena_.get_allocator();
305 }
306
307 private:
308 allocator_info info() const noexcept
309 {
310 return {FOONATHAN_MEMORY_LOG_PREFIX "::memory_pool_collection", this};
311 }
312
313 std::size_t def_capacity() const noexcept
314 {
315 return arena_.next_block_size() / pools_.size();
316 }
317
318 detail::fixed_memory_stack allocate_block()
319 {
320 return detail::fixed_memory_stack(arena_.allocate_block().memory);
321 }
322
323 const char* block_end() const noexcept
324 {
325 auto block = arena_.current_block();
326 return static_cast<const char*>(block.memory) + block.size;
327 }
328
329 bool insert_rest(typename pool_type::type& pool) noexcept
330 {
331 if (auto remaining = std::size_t(block_end() - stack_.top()))
332 {
333 auto offset = detail::align_offset(stack_.top(), detail::max_alignment);
334 if (offset < remaining)
335 {
336 detail::debug_fill(stack_.top(), offset, debug_magic::alignment_memory);
337 pool.insert(stack_.top() + offset, remaining - offset);
338 return true;
339 }
340 }
341
342 return false;
343 }
344
345 void try_reserve_memory(typename pool_type::type& pool, std::size_t capacity) noexcept
346 {
347 auto mem = stack_.allocate(block_end(), capacity, detail::max_alignment);
348 if (!mem)
349 insert_rest(pool);
350 else
351 pool.insert(mem, capacity);
352 }
353
354 memory_block reserve_memory(typename pool_type::type& pool, std::size_t capacity)
355 {
356 auto mem = stack_.allocate(block_end(), capacity, detail::max_alignment);
357 if (!mem)
358 {
359 insert_rest(pool);
360 // get new block
361 stack_ = allocate_block();
362
363 // allocate ensuring alignment
364 mem = stack_.allocate(block_end(), capacity, detail::max_alignment);
365 FOONATHAN_MEMORY_ASSERT(mem);
366 }
367 return {mem, capacity};
368 }
369
370 memory_arena<allocator_type, false> arena_;
371 detail::fixed_memory_stack stack_;
372 free_list_array pools_;
373
374 friend allocator_traits<memory_pool_collection>;
375 };
376
377 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
378 extern template class memory_pool_collection<node_pool, identity_buckets>;
379 extern template class memory_pool_collection<array_pool, identity_buckets>;
380 extern template class memory_pool_collection<small_node_pool, identity_buckets>;
381
382 extern template class memory_pool_collection<node_pool, log2_buckets>;
383 extern template class memory_pool_collection<array_pool, log2_buckets>;
384 extern template class memory_pool_collection<small_node_pool, log2_buckets>;
385 #endif
386
387 /// An alias for \ref memory_pool_collection using the \ref identity_buckets policy
388 /// and a \c PoolType defaulting to \ref node_pool.
389 /// \ingroup allocator
390 template <class PoolType = node_pool, class ImplAllocator = default_allocator>
391 FOONATHAN_ALIAS_TEMPLATE(bucket_allocator,
392 memory_pool_collection<PoolType, identity_buckets, ImplAllocator>);
393
394 template <class Allocator>
395 class allocator_traits;
396
397 /// Specialization of the \ref allocator_traits for \ref memory_pool_collection classes.
398 /// \note It is not allowed to mix calls through the specialization and through the member functions,
399 /// i.e. \ref memory_pool_collection::allocate_node() and this \c allocate_node().
400 /// \ingroup allocator
401 template <class Pool, class BucketDist, class RawAllocator>
402 class allocator_traits<memory_pool_collection<Pool, BucketDist, RawAllocator>>
403 {
404 public:
405 using allocator_type = memory_pool_collection<Pool, BucketDist, RawAllocator>;
406 using is_stateful = std::true_type;
407
408 /// \returns The result of \ref memory_pool_collection::allocate_node().
409 /// \throws Anything thrown by the pool allocation function
410 /// or a \ref bad_allocation_size exception if \c size / \c alignment exceeds \ref max_node_size() / the suitable alignment value,
411 /// i.e. the node is over-aligned.
412 static void* allocate_node(allocator_type& state, std::size_t size,
413 std::size_t alignment)
414 {
415 // node already checked
416 detail::check_allocation_size<bad_alignment>(
417 alignment, [&] { return detail::alignment_for(size); }, state.info());
418 auto mem = state.allocate_node(size);
419 state.on_allocate(size);
420 return mem;
421 }
422
423 /// \returns The result of \ref memory_pool_collection::allocate_array().
424 /// \throws Anything thrown by the pool allocation function or a \ref bad_allocation_size exception.
425 /// \requires The \ref memory_pool_collection has to support array allocations.
426 static void* allocate_array(allocator_type& state, std::size_t count, std::size_t size,
427 std::size_t alignment)
428 {
429 // node and array already checked
430 detail::check_allocation_size<bad_alignment>(
431 alignment, [&] { return detail::alignment_for(size); }, state.info());
432 auto mem = state.allocate_array(count, size);
433 state.on_allocate(count * size);
434 return mem;
435 }
436
437 /// \effects Calls \ref memory_pool_collection::deallocate_node().
438 static void deallocate_node(allocator_type& state, void* node, std::size_t size,
439 std::size_t) noexcept
440 {
441 state.deallocate_node(node, size);
442 state.on_deallocate(size);
443 }
444
445 /// \effects Calls \ref memory_pool_collection::deallocate_array().
446 /// \requires The \ref memory_pool_collection has to support array allocations.
447 static void deallocate_array(allocator_type& state, void* array, std::size_t count,
448 std::size_t size, std::size_t) noexcept
449 {
450 state.deallocate_array(array, count, size);
451 state.on_deallocate(count * size);
452 }
453
454 /// \returns The maximum size of each node which is \ref memory_pool_collection::max_node_size().
455 static std::size_t max_node_size(const allocator_type& state) noexcept
456 {
457 return state.max_node_size();
458 }
459
460 /// \returns An upper bound on the maximum array size which is \ref memory_pool::next_capacity().
461 static std::size_t max_array_size(const allocator_type& state) noexcept
462 {
463 return state.next_capacity();
464 }
465
466 /// \returns Just \c alignof(std::max_align_t) since the actual maximum alignment depends on the node size,
467 /// the nodes must not be over-aligned.
468 static std::size_t max_alignment(const allocator_type&) noexcept
469 {
470 return detail::max_alignment;
471 }
472 };
473
474 /// Specialization of the \ref composable_allocator_traits for \ref memory_pool_collection classes.
475 /// \ingroup allocator
476 template <class Pool, class BucketDist, class RawAllocator>
477 class composable_allocator_traits<memory_pool_collection<Pool, BucketDist, RawAllocator>>
478 {
479 using traits = allocator_traits<memory_pool_collection<Pool, BucketDist, RawAllocator>>;
480
481 public:
482 using allocator_type = memory_pool_collection<Pool, BucketDist, RawAllocator>;
483
484 /// \returns The result of \ref memory_pool_collection::try_allocate_node()
485 /// or `nullptr` if the allocation size was too big.
486 static void* try_allocate_node(allocator_type& state, std::size_t size,
487 std::size_t alignment) noexcept
488 {
489 if (alignment > traits::max_alignment(state))
490 return nullptr;
491 return state.try_allocate_node(size);
492 }
493
494 /// \returns The result of \ref memory_pool_collection::try_allocate_array()
495 /// or `nullptr` if the allocation size was too big.
496 static void* try_allocate_array(allocator_type& state, std::size_t count,
497 std::size_t size, std::size_t alignment) noexcept
498 {
499 if (count * size > traits::max_array_size(state)
500 || alignment > traits::max_alignment(state))
501 return nullptr;
502 return state.try_allocate_array(count, size);
503 }
504
505 /// \effects Just forwards to \ref memory_pool_collection::try_deallocate_node().
506 /// \returns Whether the deallocation was successful.
507 static bool try_deallocate_node(allocator_type& state, void* node, std::size_t size,
508 std::size_t alignment) noexcept
509 {
510 if (alignment > traits::max_alignment(state))
511 return false;
512 return state.try_deallocate_node(node, size);
513 }
514
515 /// \effects Forwards to \ref memory_pool_collection::deallocate_array().
516 /// \returns Whether the deallocation was successful.
517 static bool try_deallocate_array(allocator_type& state, void* array, std::size_t count,
518 std::size_t size, std::size_t alignment) noexcept
519 {
520 if (count * size > traits::max_array_size(state)
521 || alignment > traits::max_alignment(state))
522 return false;
523 return state.try_deallocate_array(array, count, size);
524 }
525 };
526
527 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
528 extern template class allocator_traits<memory_pool_collection<node_pool, identity_buckets>>;
529 extern template class allocator_traits<
530 memory_pool_collection<array_pool, identity_buckets>>;
531 extern template class allocator_traits<
532 memory_pool_collection<small_node_pool, identity_buckets>>;
533
534 extern template class allocator_traits<memory_pool_collection<node_pool, log2_buckets>>;
535 extern template class allocator_traits<memory_pool_collection<array_pool, log2_buckets>>;
536 extern template class allocator_traits<
537 memory_pool_collection<small_node_pool, log2_buckets>>;
538
539 extern template class composable_allocator_traits<
540 memory_pool_collection<node_pool, identity_buckets>>;
541 extern template class composable_allocator_traits<
542 memory_pool_collection<array_pool, identity_buckets>>;
543 extern template class composable_allocator_traits<
544 memory_pool_collection<small_node_pool, identity_buckets>>;
545
546 extern template class composable_allocator_traits<
547 memory_pool_collection<node_pool, log2_buckets>>;
548 extern template class composable_allocator_traits<
549 memory_pool_collection<array_pool, log2_buckets>>;
550 extern template class composable_allocator_traits<
551 memory_pool_collection<small_node_pool, log2_buckets>>;
552 #endif
553 } // namespace memory
554 } // namespace foonathan
555
556 #endif // FOONATHAN_MEMORY_MEMORY_POOL_COLLECTION_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_MEMORY_POOL_TYPE_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_MEMORY_POOL_TYPE_HPP_INCLUDED
6
7 /// \file
8 /// The \c PoolType tag types.
9
10 #include <type_traits>
11
12 #include "detail/free_list.hpp"
13 #include "detail/small_free_list.hpp"
14 #include "config.hpp"
15
16 namespace foonathan
17 {
18 namespace memory
19 {
20 /// Tag type defining a memory pool optimized for nodes.
21 /// It does not support array allocations that great and may trigger a growth even if there is enough memory.
22 /// But it is the fastest pool type.
23 /// \ingroup allocator
24 struct node_pool : FOONATHAN_EBO(std::true_type)
25 {
26 using type = detail::node_free_memory_list;
27 };
28
29 /// Tag type defining a memory pool optimized for arrays.
30 /// It keeps the nodes oredered inside the free list and searches the list for an appropriate memory block.
31 /// Array allocations are still pretty slow, if the array gets big enough it can get slower than \c new.
32 /// Node allocations are still fast, unless there is deallocation in random order.
33 /// \note Use this tag type only if you really need to have a memory pool!
34 /// \ingroup allocator
35 struct array_pool : FOONATHAN_EBO(std::true_type)
36 {
37 using type = detail::array_free_memory_list;
38 };
39
40 /// Tag type defining a memory pool optimized for small nodes.
41 /// The free list is intrusive and thus requires that each node has at least the size of a pointer.
42 /// This tag type does not have this requirement and thus allows zero-memory-overhead allocations of small nodes.
43 /// It is a little bit slower than \ref node_pool and does not support arrays.
44 /// \ingroup allocator
45 struct small_node_pool : FOONATHAN_EBO(std::false_type)
46 {
47 using type = detail::small_free_memory_list;
48 };
49 } // namespace memory
50 } // namespace foonathan
51
52 #endif // FOONATHAN_MEMORY_MEMORY_POOL_TYPE_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_MEMORY_RESOURCE_ADAPTER_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_MEMORY_RESOURCE_ADAPTER_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::memory_resource_adapter and \ref foonathan::memory::memory_resource_allocator to allow usage of PMRs.
9
10 #include "detail/assert.hpp"
11 #include "detail/utility.hpp"
12 #include "config.hpp"
13 #include "allocator_traits.hpp"
14
15 #if defined(__has_include) && __has_include(<memory_resource>)
16
17 #if !defined(__GNUC__) || __cplusplus >= 201703L
18 // The experimental/memory_resource header lacks a check for C++17 on older GCC,
19 // so we have to do it for them.
20 #include <memory_resource>
21 #endif
22
23 #elif defined(__has_include) && __has_include(<experimental/memory_resource>)
24
25 #if !defined(__GNUC__) || __cplusplus >= 201402L
26 // The experimental/memory_resource header lacks a check for C++14 on older GCC,
27 // so we have to do it for them.
28 #include <experimental/memory_resource>
29 #endif
30
31 #endif
32
33 #if defined(__cpp_lib_memory_resource)
34
35 // We use std::pmr::memory_resource.
36 namespace foonathan_memory_pmr = std::pmr;
37
38 #elif defined(__cpp_lib_experimental_memory_resources)
39
40 // We use std::experimental::pmr::memory_resource.
41 namespace foonathan_memory_pmr = std::experimental::pmr;
42
43 #else
44
45 // We use our own implementation.
46 namespace foonathan_memory_pmr
47 {
48 // see N3916 for documentation
49 class memory_resource
50 {
51 static const std::size_t max_alignment = alignof(std::max_align_t);
52
53 public:
54 virtual ~memory_resource() noexcept {}
55 void* allocate(std::size_t bytes, std::size_t alignment = max_alignment)
56 {
57 return do_allocate(bytes, alignment);
58 }
59 void deallocate(void* p, std::size_t bytes, std::size_t alignment = max_alignment)
60 {
61 do_deallocate(p, bytes, alignment);
62 }
63 bool is_equal(const memory_resource& other) const noexcept
64 {
65 return do_is_equal(other);
66 }
67
68 protected:
69 virtual void* do_allocate(std::size_t bytes, std::size_t alignment) = 0;
70 virtual void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) = 0;
71 virtual bool do_is_equal(const memory_resource& other) const noexcept = 0;
72 };
73 inline bool operator==(const memory_resource& a, const memory_resource& b) noexcept
74 {
75 return &a == &b || a.is_equal(b);
76 }
77 inline bool operator!=(const memory_resource& a, const memory_resource& b) noexcept
78 {
79 return !(a == b);
80 }
81 } // namespace foonathan_memory_pmr
82
83 #endif
84
85 namespace foonathan
86 {
87 namespace memory
88 {
89 /// The \c memory_resource abstract base class used in the implementation.
90 /// \ingroup adapter
91 FOONATHAN_ALIAS_TEMPLATE(memory_resource, foonathan_memory_pmr::memory_resource);
92
93 /// Wraps a \concept{concept_rawallocator,RawAllocator} and makes it a \ref memory_resource.
94 /// \ingroup adapter
95 template <class RawAllocator>
96 class memory_resource_adapter
97 : public memory_resource,
98 FOONATHAN_EBO(allocator_traits<RawAllocator>::allocator_type)
99 {
100 public:
101 using allocator_type = typename allocator_traits<RawAllocator>::allocator_type;
102
103 /// \effects Creates the resource by moving in the allocator.
104 memory_resource_adapter(allocator_type&& other) noexcept
105 : allocator_type(detail::move(other))
106 {
107 }
108
109 /// @{
110 /// \returns A reference to the wrapped allocator.
111 allocator_type& get_allocator() noexcept
112 {
113 return *this;
114 }
115
116 const allocator_type& get_allocator() const noexcept
117 {
118 return *this;
119 }
120 /// @}
121
122 protected:
123 using traits_type = allocator_traits<RawAllocator>;
124
125 /// \effects Allocates raw memory with given size and alignment.
126 /// It forwards to \c allocate_node() or \c allocate_array() depending on the size.
127 /// \returns The new memory as returned by the \concept{concept_rawallocator,RawAllocator}.
128 /// \throws Anything thrown by the allocation function.
129 void* do_allocate(std::size_t bytes, std::size_t alignment) override
130 {
131 auto max = traits_type::max_node_size(*this);
132 if (bytes <= max)
133 return traits_type::allocate_node(*this, bytes, alignment);
134 auto div = bytes / max;
135 auto mod = bytes % max;
136 auto n = div + (mod != 0);
137 return traits_type::allocate_array(*this, n, max, alignment);
138 }
139
140 /// \effects Deallocates memory previously allocated by \ref do_allocate.
141 /// It forwards to \c deallocate_node() or \c deallocate_array() depending on the size.
142 /// \throws Nothing.
143 void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) override
144 {
145 auto max = traits_type::max_node_size(*this);
146 if (bytes <= max)
147 traits_type::deallocate_node(*this, p, bytes, alignment);
148 else
149 {
150 auto div = bytes / max;
151 auto mod = bytes % max;
152 auto n = div + (mod != 0);
153 traits_type::deallocate_array(*this, p, n, max, alignment);
154 }
155 }
156
157 /// \returns Whether or not \c *this is equal to \c other
158 /// by comparing the addresses.
159 bool do_is_equal(const memory_resource& other) const noexcept override
160 {
161 return this == &other;
162 }
163 };
164
165 /// Wraps a \ref memory_resource and makes it a \concept{concept_rawallocator,RawAllocator}.
166 /// \ingroup adapter
167 class memory_resource_allocator
168 {
169 public:
170 /// \effects Creates it by giving it a pointer to the \ref memory_resource.
171 /// \requires \c ptr must not be \c nullptr.
172 memory_resource_allocator(memory_resource* ptr) noexcept : ptr_(ptr)
173 {
174 FOONATHAN_MEMORY_ASSERT(ptr);
175 }
176
177 /// \effects Allocates a node by forwarding to the \c allocate() function.
178 /// \returns The node as returned by the \ref memory_resource.
179 /// \throws Anything thrown by the \c allocate() function.
180 void* allocate_node(std::size_t size, std::size_t alignment)
181 {
182 return ptr_->allocate(size, alignment);
183 }
184
185 /// \effects Deallocates a node by forwarding to the \c deallocate() function.
186 void deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
187 {
188 ptr_->deallocate(ptr, size, alignment);
189 }
190
191 /// \returns The maximum alignment which is the maximum value of type \c std::size_t.
192 std::size_t max_alignment() const noexcept
193 {
194 return std::size_t(-1);
195 }
196
197 /// \returns A pointer to the used \ref memory_resource, this is never \c nullptr.
198 memory_resource* resource() const noexcept
199 {
200 return ptr_;
201 }
202
203 private:
204 memory_resource* ptr_;
205 };
206
207 /// @{
208 /// \returns Whether `lhs` and `rhs` share the same resource.
209 /// \relates memory_resource_allocator
210 inline bool operator==(const memory_resource_allocator& lhs,
211 const memory_resource_allocator& rhs) noexcept
212 {
213 return lhs.resource() == rhs.resource();
214 }
215
216 inline bool operator!=(const memory_resource_allocator& lhs,
217 const memory_resource_allocator& rhs) noexcept
218 {
219 return !(lhs == rhs);
220 }
221 /// @}
222
223 #if !defined(DOXYGEN)
224 template <class RawAllocator>
225 struct is_shared_allocator;
226 #endif
227
228 /// Specialization of \ref is_shared_allocator to mark \ref memory_resource_allocator as shared.
229 /// This allows using it as \ref allocator_reference directly.
230 /// \ingroup adapter
231 template <>
232 struct is_shared_allocator<memory_resource_allocator> : std::true_type
233 {
234 };
235 } // namespace memory
236 } // namespace foonathan
237
238 #endif // FOONATHAN_MEMORY_MEMORY_RESOURCE_ADAPTER_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_MEMORY_STACK_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_MEMORY_STACK_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::memory_stack and its \ref foonathan::memory::allocator_traits specialization.
9
10 // Inform that foonathan::memory::memory_stack::min_block_size API is available
11 #define FOONATHAN_MEMORY_MEMORY_STACK_HAS_MIN_BLOCK_SIZE
12
13 #include <cstdint>
14 #include <type_traits>
15
16 #include "detail/assert.hpp"
17 #include "detail/memory_stack.hpp"
18 #include "config.hpp"
19 #include "error.hpp"
20 #include "memory_arena.hpp"
21
22 namespace foonathan
23 {
24 namespace memory
25 {
26 #if !defined(DOXYGEN)
27 template <class Impl>
28 class memory_stack;
29 #endif
30
31 namespace detail
32 {
33 class stack_marker
34 {
35 std::size_t index;
36 char* top;
37 const char* end;
38
39 stack_marker(std::size_t i, const detail::fixed_memory_stack& s,
40 const char* e) noexcept
41 : index(i), top(s.top()), end(e)
42 {
43 }
44
45 friend bool operator==(const stack_marker& lhs, const stack_marker& rhs) noexcept
46 {
47 if (lhs.index != rhs.index)
48 return false;
49 FOONATHAN_MEMORY_ASSERT_MSG(lhs.end == rhs.end, "you must not compare two "
50 "stack markers from different "
51 "stacks");
52 return lhs.top == rhs.top;
53 }
54
55 friend bool operator!=(const stack_marker& lhs, const stack_marker& rhs) noexcept
56 {
57 return !(rhs == lhs);
58 }
59
60 friend bool operator<(const stack_marker& lhs, const stack_marker& rhs) noexcept
61 {
62 if (lhs.index != rhs.index)
63 return lhs.index < rhs.index;
64 FOONATHAN_MEMORY_ASSERT_MSG(lhs.end == rhs.end, "you must not compare two "
65 "stack markers from different "
66 "stacks");
67 return lhs.top < rhs.top;
68 }
69
70 friend bool operator>(const stack_marker& lhs, const stack_marker& rhs) noexcept
71 {
72 return rhs < lhs;
73 }
74
75 friend bool operator<=(const stack_marker& lhs, const stack_marker& rhs) noexcept
76 {
77 return !(rhs < lhs);
78 }
79
80 friend bool operator>=(const stack_marker& lhs, const stack_marker& rhs) noexcept
81 {
82 return !(lhs < rhs);
83 }
84
85 template <class Impl>
86 friend class memory::memory_stack;
87 };
88
89 struct memory_stack_leak_handler
90 {
91 void operator()(std::ptrdiff_t amount);
92 };
93 } // namespace detail
94
95 /// A stateful \concept{concept_rawallocator,RawAllocator} that provides stack-like (LIFO) allocations.
96 /// It uses a \ref memory_arena with a given \c BlockOrRawAllocator defaulting to \ref growing_block_allocator to allocate huge blocks
97 /// and saves a marker to the current top.
98 /// Allocation simply moves this marker by the appropriate number of bytes and returns the pointer at the old marker position,
99 /// deallocation is not directly supported, only setting the marker to a previously queried position.
100 /// \ingroup allocator
101 template <class BlockOrRawAllocator = default_allocator>
102 class memory_stack
103 : FOONATHAN_EBO(detail::default_leak_checker<detail::memory_stack_leak_handler>)
104 {
105 public:
106 using allocator_type = make_block_allocator_t<BlockOrRawAllocator>;
107
108 /// \returns The minimum block size required for a stack containing the given amount of memory.
109 /// If a stack is created with the result of `min_block_size(n)`, the resulting capacity will be exactly `n`.
110 /// \requires `byte_size` must be a positive number.
111 /// \note Due to debug fence sizes, the actual amount of usable memory can vary.
112 /// However, this is impossible to compute without knowing the exact allocation pattern before,
113 /// so this is just a rough estimate.
114 static constexpr std::size_t min_block_size(std::size_t byte_size) noexcept
115 {
116 return detail::memory_block_stack::implementation_offset() + byte_size;
117 }
118
119 /// \effects Creates it with a given initial block size and and other constructor arguments for the \concept{concept_blockallocator,BlockAllocator}.
120 /// It will allocate the first block and sets the top to its beginning.
121 template <typename... Args>
122 explicit memory_stack(std::size_t block_size, Args&&... args)
123 : arena_(block_size, detail::forward<Args>(args)...),
124 stack_(arena_.allocate_block().memory)
125 {
126 }
127
128 /// \effects Allocates a memory block of given size and alignment.
129 /// It simply moves the top marker.
130 /// If there is not enough space on the current memory block,
131 /// a new one will be allocated by the \concept{concept_blockallocator,BlockAllocator} or taken from a cache
132 /// and used for the allocation.
133 /// \returns A \concept{concept_node,node} with given size and alignment.
134 /// \throws Anything thrown by the \concept{concept_blockallocator,BlockAllocator} on growth
135 /// or \ref bad_allocation_size if \c size is too big.
136 /// \requires \c size and \c alignment must be valid.
137 void* allocate(std::size_t size, std::size_t alignment)
138 {
139 auto fence = detail::debug_fence_size;
140 auto offset = detail::align_offset(stack_.top() + fence, alignment);
141
142 if (!stack_.top()
143 || fence + offset + size + fence > std::size_t(block_end() - stack_.top()))
144 {
145 // need to grow
146 auto block = arena_.allocate_block();
147 stack_ = detail::fixed_memory_stack(block.memory);
148
149 // new alignment required for over-aligned types
150 offset = detail::align_offset(stack_.top() + fence, alignment);
151
152 auto needed = fence + offset + size + fence;
153 detail::check_allocation_size<bad_allocation_size>(needed, block.size, info());
154 }
155
156 return stack_.allocate_unchecked(size, offset);
157 }
158
159 /// \effects Allocates a memory block of given size and alignment,
160 /// similar to \ref allocate().
161 /// But it does not attempt a growth if the arena is empty.
162 /// \returns A \concept{concept_node,node} with given size and alignment
163 /// or `nullptr` if there wasn't enough memory available.
164 void* try_allocate(std::size_t size, std::size_t alignment) noexcept
165 {
166 return stack_.allocate(block_end(), size, alignment);
167 }
168
169 /// The marker type that is used for unwinding.
170 /// The exact type is implementation defined,
171 /// it is only required that it is efficiently copyable
172 /// and has all the comparision operators defined for two markers on the same stack.
173 /// Two markers are equal, if they are copies or created from two `top()` calls without a call to `unwind()` or `allocate()`.
174 /// A marker `a` is less than marker `b`, if after `a` was obtained, there was one or more call to `allocate()` and no call to `unwind()`.
175 using marker = FOONATHAN_IMPL_DEFINED(detail::stack_marker);
176
177 /// \returns A marker to the current top of the stack.
178 marker top() const noexcept
179 {
180 return {arena_.size() - 1, stack_, block_end()};
181 }
182
183 /// \effects Unwinds the stack to a certain marker position.
184 /// This sets the top pointer of the stack to the position described by the marker
185 /// and has the effect of deallocating all memory allocated since the marker was obtained.
186 /// If any memory blocks are unused after the operation,
187 /// they are not deallocated but put in a cache for later use,
188 /// call \ref shrink_to_fit() to actually deallocate them.
189 /// \requires The marker must point to memory that is still in use and was the whole time,
190 /// i.e. it must have been pointed below the top at all time.
191 void unwind(marker m) noexcept
192 {
193 FOONATHAN_MEMORY_ASSERT(m <= top());
194 detail::debug_check_pointer([&] { return m.index <= arena_.size() - 1; }, info(),
195 m.top);
196
197 if (std::size_t to_deallocate = (arena_.size() - 1) - m.index) // different index
198 {
199 arena_.deallocate_block();
200 for (std::size_t i = 1; i != to_deallocate; ++i)
201 arena_.deallocate_block();
202
203 detail::debug_check_pointer(
204 [&] {
205 auto cur = arena_.current_block();
206 return m.end == static_cast<char*>(cur.memory) + cur.size;
207 },
208 info(), m.top);
209
210 // mark memory from new top to end of the block as freed
211 detail::debug_fill_free(m.top, std::size_t(m.end - m.top), 0);
212 stack_ = detail::fixed_memory_stack(m.top);
213 }
214 else // same index
215 {
216 detail::debug_check_pointer([&] { return stack_.top() >= m.top; }, info(),
217 m.top);
218 stack_.unwind(m.top);
219 }
220 }
221
222 /// \effects \ref unwind() does not actually do any deallocation of blocks on the \concept{concept_blockallocator,BlockAllocator},
223 /// unused memory is stored in a cache for later reuse.
224 /// This function clears that cache.
225 void shrink_to_fit() noexcept
226 {
227 arena_.shrink_to_fit();
228 }
229
230 /// \returns The amount of memory remaining in the current block.
231 /// This is the number of bytes that are available for allocation
232 /// before the cache or \concept{concept_blockallocator,BlockAllocator} needs to be used.
233 std::size_t capacity_left() const noexcept
234 {
235 return std::size_t(block_end() - stack_.top());
236 }
237
238 /// \returns The size of the next memory block after the current block is exhausted and the arena grows.
239 /// This function just forwards to the \ref memory_arena.
240 /// \note All of it is available for the stack to use, but due to fences and alignment buffers,
241 /// this may not be the exact amount of memory usable for the user.
242 std::size_t next_capacity() const noexcept
243 {
244 return arena_.next_block_size();
245 }
246
247 /// \returns A reference to the \concept{concept_blockallocator,BlockAllocator} used for managing the arena.
248 /// \requires It is undefined behavior to move this allocator out into another object.
249 allocator_type& get_allocator() noexcept
250 {
251 return arena_.get_allocator();
252 }
253
254 private:
255 allocator_info info() const noexcept
256 {
257 return {FOONATHAN_MEMORY_LOG_PREFIX "::memory_stack", this};
258 }
259
260 const char* block_end() const noexcept
261 {
262 auto block = arena_.current_block();
263 return static_cast<const char*>(block.memory) + block.size;
264 }
265
266 memory_arena<allocator_type> arena_;
267 detail::fixed_memory_stack stack_;
268
269 friend allocator_traits<memory_stack<BlockOrRawAllocator>>;
270 friend composable_allocator_traits<memory_stack<BlockOrRawAllocator>>;
271 };
272
273 /// Simple utility that automatically unwinds a `Stack` to a previously saved location.
274 /// A `Stack` is anything that provides a `marker`, a `top()` function returning a `marker`
275 /// and an `unwind()` function to unwind to a `marker`,
276 /// like a \ref foonathan::memory::memory_stack
277 /// \ingroup allocator
278 template <class Stack = memory_stack<>>
279 class memory_stack_raii_unwind
280 {
281 public:
282 using stack_type = Stack;
283 using marker_type = typename stack_type::marker;
284
285 /// \effects Same as `memory_stack_raii_unwind(stack, stack.top())`.
286 explicit memory_stack_raii_unwind(stack_type& stack) noexcept
287 : memory_stack_raii_unwind(stack, stack.top())
288 {
289 }
290
291 /// \effects Creates the unwinder by giving it the stack and the marker.
292 /// \requires The stack must live longer than this object.
293 memory_stack_raii_unwind(stack_type& stack, marker_type marker) noexcept
294 : marker_(marker), stack_(&stack)
295 {
296 }
297
298 /// \effects Move constructs the unwinder by taking the saved position from `other`.
299 /// `other.will_unwind()` will return `false` after it.
300 memory_stack_raii_unwind(memory_stack_raii_unwind&& other) noexcept
301 : marker_(other.marker_), stack_(other.stack_)
302 {
303 other.stack_ = nullptr;
304 }
305
306 /// \effects Unwinds to the previously saved location,
307 /// if there is any, by calling `unwind()`.
308 ~memory_stack_raii_unwind() noexcept
309 {
310 if (stack_)
311 stack_->unwind(marker_);
312 }
313
314 /// \effects Move assigns the unwinder by taking the saved position from `other`.
315 /// `other.will_unwind()` will return `false` after it.
316 memory_stack_raii_unwind& operator=(memory_stack_raii_unwind&& other) noexcept
317 {
318 if (stack_)
319 stack_->unwind(marker_);
320
321 marker_ = other.marker_;
322 stack_ = other.stack_;
323
324 other.stack_ = nullptr;
325
326 return *this;
327 }
328
329 /// \effects Removes the location without unwinding it.
330 /// `will_unwind()` will return `false`.
331 void release() noexcept
332 {
333 stack_ = nullptr;
334 }
335
336 /// \effects Unwinds to the saved location explictly.
337 /// \requires `will_unwind()` must return `true`.
338 void unwind() noexcept
339 {
340 FOONATHAN_MEMORY_ASSERT(will_unwind());
341 stack_->unwind(marker_);
342 }
343
344 /// \returns Whether or not the unwinder will actually unwind.
345 /// \note It will not unwind if it is in the moved-from state.
346 bool will_unwind() const noexcept
347 {
348 return stack_ != nullptr;
349 }
350
351 /// \returns The saved marker, if there is any.
352 /// \requires `will_unwind()` must return `true`.
353 marker_type get_marker() const noexcept
354 {
355 FOONATHAN_MEMORY_ASSERT(will_unwind());
356 return marker_;
357 }
358
359 /// \returns The stack it will unwind.
360 /// \requires `will_unwind()` must return `true`.
361 stack_type& get_stack() const noexcept
362 {
363 FOONATHAN_MEMORY_ASSERT(will_unwind());
364 return *stack_;
365 }
366
367 private:
368 marker_type marker_;
369 stack_type* stack_;
370 };
371
372 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
373 extern template class memory_stack<>;
374 extern template class memory_stack_raii_unwind<memory_stack<>>;
375 #endif
376
377 /// Specialization of the \ref allocator_traits for \ref memory_stack classes.
378 /// \note It is not allowed to mix calls through the specialization and through the member functions,
379 /// i.e. \ref memory_stack::allocate() and this \c allocate_node().
380 /// \ingroup allocator
381 template <class BlockAllocator>
382 class allocator_traits<memory_stack<BlockAllocator>>
383 {
384 public:
385 using allocator_type = memory_stack<BlockAllocator>;
386 using is_stateful = std::true_type;
387
388 /// \returns The result of \ref memory_stack::allocate().
389 static void* allocate_node(allocator_type& state, std::size_t size,
390 std::size_t alignment)
391 {
392 auto mem = state.allocate(size, alignment);
393 state.on_allocate(size);
394 return mem;
395 }
396
397 /// \returns The result of \ref memory_stack::allocate().
398 static void* allocate_array(allocator_type& state, std::size_t count, std::size_t size,
399 std::size_t alignment)
400 {
401 return allocate_node(state, count * size, alignment);
402 }
403
404 /// @{
405 /// \effects Does nothing besides bookmarking for leak checking, if that is enabled.
406 /// Actual deallocation can only be done via \ref memory_stack::unwind().
407 static void deallocate_node(allocator_type& state, void*, std::size_t size,
408 std::size_t) noexcept
409 {
410 state.on_deallocate(size);
411 }
412
413 static void deallocate_array(allocator_type& state, void* ptr, std::size_t count,
414 std::size_t size, std::size_t alignment) noexcept
415 {
416 deallocate_node(state, ptr, count * size, alignment);
417 }
418 /// @}
419
420 /// @{
421 /// \returns The maximum size which is \ref memory_stack::next_capacity().
422 static std::size_t max_node_size(const allocator_type& state) noexcept
423 {
424 return state.next_capacity();
425 }
426
427 static std::size_t max_array_size(const allocator_type& state) noexcept
428 {
429 return state.next_capacity();
430 }
431 /// @}
432
433 /// \returns The maximum possible value since there is no alignment restriction
434 /// (except indirectly through \ref memory_stack::next_capacity()).
435 static std::size_t max_alignment(const allocator_type&) noexcept
436 {
437 return std::size_t(-1);
438 }
439 };
440
441 /// Specialization of the \ref composable_allocator_traits for \ref memory_stack classes.
442 /// \ingroup allocator
443 template <class BlockAllocator>
444 class composable_allocator_traits<memory_stack<BlockAllocator>>
445 {
446 public:
447 using allocator_type = memory_stack<BlockAllocator>;
448
449 /// \returns The result of \ref memory_stack::try_allocate().
450 static void* try_allocate_node(allocator_type& state, std::size_t size,
451 std::size_t alignment) noexcept
452 {
453 return state.try_allocate(size, alignment);
454 }
455
456 /// \returns The result of \ref memory_stack::try_allocate().
457 static void* try_allocate_array(allocator_type& state, std::size_t count,
458 std::size_t size, std::size_t alignment) noexcept
459 {
460 return state.try_allocate(count * size, alignment);
461 }
462
463 /// @{
464 /// \effects Does nothing.
465 /// \returns Whether the memory will be deallocated by \ref memory_stack::unwind().
466 static bool try_deallocate_node(allocator_type& state, void* ptr, std::size_t,
467 std::size_t) noexcept
468 {
469 return state.arena_.owns(ptr);
470 }
471
472 static bool try_deallocate_array(allocator_type& state, void* ptr, std::size_t count,
473 std::size_t size, std::size_t alignment) noexcept
474 {
475 return try_deallocate_node(state, ptr, count * size, alignment);
476 }
477 /// @}
478 };
479
480 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
481 extern template class allocator_traits<memory_stack<>>;
482 extern template class composable_allocator_traits<memory_stack<>>;
483 #endif
484 } // namespace memory
485 } // namespace foonathan
486
487 #endif // FOONATHAN_MEMORY_MEMORY_STACK_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_NAMESPACE_ALIAS_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_NAMESPACE_ALIAS_HPP_INCLUDED
6
7 /// \file
8 /// Convenient namespace alias.
9
10 /// \defgroup core Core components
11
12 /// \defgroup allocator Allocator implementations
13
14 /// \defgroup adapter Adapters and Wrappers
15
16 /// \defgroup storage Allocator storage
17
18 /// \namespace foonathan
19 /// Foonathan namespace.
20
21 /// \namespace foonathan::memory
22 /// Memory namespace.
23
24 /// \namespace foonathan::memory::literals
25 /// Literals namespace.
26
27 namespace foonathan
28 {
29 namespace memory
30 {
31 }
32 } // namespace foonathan
33
34 namespace memory = foonathan::memory;
35
36 #endif // FOONATHAN_MEMORY_NAMESPACE_ALIAS_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_NEW_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_NEW_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::new_allocator.
9
10 #include "detail/lowlevel_allocator.hpp"
11 #include "config.hpp"
12
13 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
14 #include "allocator_traits.hpp"
15 #endif
16
17 namespace foonathan
18 {
19 namespace memory
20 {
21 struct allocator_info;
22
23 namespace detail
24 {
25 struct new_allocator_impl
26 {
27 static allocator_info info() noexcept;
28
29 static void* allocate(std::size_t size, std::size_t) noexcept;
30
31 static void deallocate(void* ptr, std::size_t size, std::size_t) noexcept;
32
33 static std::size_t max_node_size() noexcept;
34 };
35
36 FOONATHAN_MEMORY_LL_ALLOCATOR_LEAK_CHECKER(new_allocator_impl,
37 new_alloator_leak_checker)
38 } // namespace detail
39
40 /// A stateless \concept{concept_rawallocator,RawAllocator} that allocates memory using (nothrow) <tt>operator new</tt>.
41 /// If the operator returns \c nullptr, it behaves like \c new and loops calling \c std::new_handler,
42 /// but instead of throwing a \c std::bad_alloc exception, it throws \ref out_of_memory.
43 /// \ingroup allocator
44 using new_allocator =
45 FOONATHAN_IMPL_DEFINED(detail::lowlevel_allocator<detail::new_allocator_impl>);
46
47 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
48 extern template class detail::lowlevel_allocator<detail::new_allocator_impl>;
49 extern template class allocator_traits<new_allocator>;
50 #endif
51 } // namespace memory
52 } // namespace foonathan
53
54 #endif // FOONATHAN_MEMORY_NEW_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_SEGREGATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_SEGREGATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class template \ref foonathan::memory::segregator and related classes.
9
10 #include "detail/ebo_storage.hpp"
11 #include "detail/utility.hpp"
12 #include "allocator_traits.hpp"
13 #include "config.hpp"
14 #include "error.hpp"
15
16 namespace foonathan
17 {
18 namespace memory
19 {
20 /// A \concept{concept_segregatable,Segregatable} that allocates until a maximum size.
21 /// \ingroup adapter
22 template <class RawAllocator>
23 class threshold_segregatable : FOONATHAN_EBO(allocator_traits<RawAllocator>::allocator_type)
24 {
25 public:
26 using allocator_type = typename allocator_traits<RawAllocator>::allocator_type;
27
28 /// \effects Creates it by passing the maximum size it will allocate
29 /// and the allocator it uses.
30 explicit threshold_segregatable(std::size_t max_size,
31 allocator_type alloc = allocator_type())
32 : allocator_type(detail::move(alloc)), max_size_(max_size)
33 {
34 }
35
36 /// \returns `true` if `size` is less then or equal to the maximum size,
37 /// `false` otherwise.
38 /// \note A return value of `true` means that the allocator will be used for the allocation.
39 bool use_allocate_node(std::size_t size, std::size_t) noexcept
40 {
41 return size <= max_size_;
42 }
43
44 /// \returns `true` if `count * size` is less then or equal to the maximum size,
45 /// `false` otherwise.
46 /// \note A return value of `true` means that the allocator will be used for the allocation.
47 bool use_allocate_array(std::size_t count, std::size_t size, std::size_t) noexcept
48 {
49 return count * size <= max_size_;
50 }
51
52 /// @{
53 /// \returns A reference to the allocator it owns.
54 allocator_type& get_allocator() noexcept
55 {
56 return *this;
57 }
58
59 const allocator_type& get_allocator() const noexcept
60 {
61 return *this;
62 }
63 /// @}
64
65 private:
66 std::size_t max_size_;
67 };
68
69 /// \returns A \ref threshold_segregatable with the same parameter.
70 template <class RawAllocator>
71 threshold_segregatable<typename std::decay<RawAllocator>::type> threshold(
72 std::size_t max_size, RawAllocator&& alloc)
73 {
74 return threshold_segregatable<
75 typename std::decay<RawAllocator>::type>(max_size,
76 std::forward<RawAllocator>(alloc));
77 }
78
79 /// A composable \concept{concept_rawallocator,RawAllocator} that will always fail.
80 /// This is useful for compositioning or as last resort in \ref binary_segregator.
81 /// \ingroup allocator
82 class null_allocator
83 {
84 public:
85 /// \effects Will always throw.
86 /// \throws A \ref out_of_fixed_memory exception.
87 void* allocate_node(std::size_t size, std::size_t)
88 {
89 throw out_of_fixed_memory(info(), size);
90 }
91
92 /// \requires Must not be called.
93 void deallocate_node(void*, std::size_t, std::size_t) noexcept
94 {
95 FOONATHAN_MEMORY_UNREACHABLE("cannot be called with proper values");
96 }
97
98 /// \effects Does nothing.
99 /// \returns Always returns `nullptr`.
100 void* try_allocate_node(std::size_t, std::size_t) noexcept
101 {
102 return nullptr;
103 }
104
105 /// \effects Does nothing.
106 /// \returns Always returns `false`.
107 bool try_deallocate_node(void*, std::size_t, std::size_t) noexcept
108 {
109 return false;
110 }
111
112 private:
113 allocator_info info() const noexcept
114 {
115 return {FOONATHAN_MEMORY_LOG_PREFIX "::null_allocator", this};
116 }
117 };
118
119 /// A \concept{concept_rawallocator,RawAllocator} that either uses the \concept{concept_segregatable,Segregatable} or the other `RawAllocator`.
120 /// It is a faster alternative to \ref fallback_allocator that doesn't require a composable allocator
121 /// and decides about the allocator to use purely with the `Segregatable` based on size and alignment.
122 /// \ingroup adapter
123 template <class Segregatable, class RawAllocator>
124 class binary_segregator
125 : FOONATHAN_EBO(
126 detail::ebo_storage<1, typename allocator_traits<RawAllocator>::allocator_type>)
127 {
128 using segregatable_traits = allocator_traits<typename Segregatable::allocator_type>;
129 using fallback_traits = allocator_traits<RawAllocator>;
130
131 public:
132 using segregatable = Segregatable;
133 using segregatable_allocator_type = typename segregatable::allocator_type;
134 using fallback_allocator_type = typename allocator_traits<RawAllocator>::allocator_type;
135
136 /// \effects Creates it by giving the \concept{concept_segregatable,Segregatable}
137 /// and the \concept{concept_rawallocator,RawAllocator}.
138 explicit binary_segregator(segregatable s,
139 fallback_allocator_type fallback = fallback_allocator_type())
140 : detail::ebo_storage<1, fallback_allocator_type>(detail::move(fallback)),
141 s_(detail::move(s))
142 {
143 }
144
145 /// @{
146 /// \effects Uses the \concept{concept_segregatable,Segregatable} to decide which allocator to use.
147 /// Then forwards to the chosen allocator.
148 void* allocate_node(std::size_t size, std::size_t alignment)
149 {
150 if (get_segregatable().use_allocate_node(size, alignment))
151 return segregatable_traits::allocate_node(get_segregatable_allocator(), size,
152 alignment);
153 else
154 return fallback_traits::allocate_node(get_fallback_allocator(), size,
155 alignment);
156 }
157
158 void deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
159 {
160 if (get_segregatable().use_allocate_node(size, alignment))
161 segregatable_traits::deallocate_node(get_segregatable_allocator(), ptr, size,
162 alignment);
163 else
164 fallback_traits::deallocate_node(get_fallback_allocator(), ptr, size,
165 alignment);
166 }
167
168 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment)
169 {
170 if (get_segregatable().use_allocate_array(count, size, alignment))
171 return segregatable_traits::allocate_array(get_segregatable_allocator(), count,
172 size, alignment);
173 else
174 return fallback_traits::allocate_array(get_fallback_allocator(), count, size,
175 alignment);
176 }
177
178 void deallocate_array(void* array, std::size_t count, std::size_t size,
179 std::size_t alignment) noexcept
180 {
181 if (get_segregatable().use_allocate_array(count, size, alignment))
182 segregatable_traits::deallocate_array(get_segregatable_allocator(), array,
183 count, size, alignment);
184 else
185 fallback_traits::deallocate_array(get_fallback_allocator(), array, count, size,
186 alignment);
187 }
188 /// @}
189
190 /// @{
191 /// \returns The maximum value of the fallback.
192 /// \note It assumes that the fallback will be used for larger allocations,
193 /// and the `Segregatable` for smaller ones.
194 std::size_t max_node_size() const
195 {
196 return fallback_traits::max_node_size(get_fallback_allocator());
197 }
198
199 std::size_t max_array_size() const
200 {
201 return fallback_traits::max_array_size(get_fallback_allocator());
202 }
203
204 std::size_t max_alignemnt() const
205 {
206 return fallback_traits::max_alignment(get_fallback_allocator());
207 }
208 /// @}
209
210 /// @{
211 /// \returns A reference to the segregatable allocator.
212 /// This is the one primarily used.
213 segregatable_allocator_type& get_segregatable_allocator() noexcept
214 {
215 return get_segregatable().get_allocator();
216 }
217
218 const segregatable_allocator_type& get_segregatable_allocator() const noexcept
219 {
220 return get_segregatable().get_allocator();
221 }
222 /// @}
223
224 /// @{
225 /// \returns A reference to the fallback allocator.
226 /// It will be used if the \concept{concept_segregator,Segregator} doesn't want the alloction.
227 fallback_allocator_type& get_fallback_allocator() noexcept
228 {
229 return detail::ebo_storage<1, fallback_allocator_type>::get();
230 }
231
232 const fallback_allocator_type& get_fallback_allocator() const noexcept
233 {
234 return detail::ebo_storage<1, fallback_allocator_type>::get();
235 }
236 /// @}
237
238 private:
239 segregatable& get_segregatable() noexcept
240 {
241 return s_;
242 }
243
244 segregatable s_;
245 };
246
247 namespace detail
248 {
249 template <class... Segregatables>
250 struct make_segregator_t;
251
252 template <class Segregatable>
253 struct make_segregator_t<Segregatable>
254 {
255 using type = binary_segregator<Segregatable, null_allocator>;
256 };
257
258 template <class Segregatable, class RawAllocator>
259 struct make_segregator_t<Segregatable, RawAllocator>
260 {
261 using type = binary_segregator<Segregatable, RawAllocator>;
262 };
263
264 template <class Segregatable, class... Tail>
265 struct make_segregator_t<Segregatable, Tail...>
266 {
267 using type =
268 binary_segregator<Segregatable, typename make_segregator_t<Tail...>::type>;
269 };
270
271 template <class Segregator, class Fallback = null_allocator>
272 auto make_segregator(Segregator&& seg, Fallback&& f = null_allocator{})
273 -> binary_segregator<typename std::decay<Segregator>::type,
274 typename std::decay<Fallback>::type>
275 {
276 return binary_segregator<
277 typename std::decay<Segregator>::type,
278 typename std::decay<Fallback>::type>(std::forward<Segregator>(seg),
279 std::forward<Fallback>(f));
280 }
281
282 template <class Segregator, typename... Rest>
283 auto make_segregator(Segregator&& seg, Rest&&... rest)
284 -> binary_segregator<typename std::decay<Segregator>::type,
285 decltype(make_segregator(std::forward<Rest>(rest)...))>
286 {
287 return binary_segregator<typename std::decay<Segregator>::type,
288 decltype(make_segregator(std::forward<Rest>(
289 rest)...))>(std::forward<Segregator>(seg),
290 make_segregator(
291 std::forward<Rest>(rest)...));
292 }
293
294 template <std::size_t I, class Segregator>
295 struct segregatable_type;
296
297 template <class Segregator, class Fallback>
298 struct segregatable_type<0, binary_segregator<Segregator, Fallback>>
299 {
300 using type = typename Segregator::allocator_type;
301
302 static type& get(binary_segregator<Segregator, Fallback>& s)
303 {
304 return s.get_segregatable_allocator();
305 }
306
307 static const type& get(const binary_segregator<Segregator, Fallback>& s)
308 {
309 return s.get_segregatable_allocator();
310 }
311 };
312
313 template <std::size_t I, class Segregator, class Fallback>
314 struct segregatable_type<I, binary_segregator<Segregator, Fallback>>
315 {
316 using base = segregatable_type<I - 1, Fallback>;
317 using type = typename base::type;
318
319 static type& get(binary_segregator<Segregator, Fallback>& s)
320 {
321 return base::get(s.get_fallback_allocator());
322 }
323
324 static const type& get(const binary_segregator<Segregator, Fallback>& s)
325 {
326 return base::get(s.get_fallback_allocator());
327 }
328 };
329
330 template <class Fallback>
331 struct fallback_type
332 {
333 using type = Fallback;
334
335 static const std::size_t size = 0u;
336
337 static type& get(Fallback& f)
338 {
339 return f;
340 }
341
342 static const type& get(const Fallback& f)
343 {
344 return f;
345 }
346 };
347
348 template <class Segregator, class Fallback>
349 struct fallback_type<binary_segregator<Segregator, Fallback>>
350 {
351 using base = fallback_type<Fallback>;
352 using type = typename base::type;
353
354 static const std::size_t size = base::size + 1u;
355
356 static type& get(binary_segregator<Segregator, Fallback>& s)
357 {
358 return base::get(s.get_fallback_allocator());
359 }
360
361 static const type& get(const binary_segregator<Segregator, Fallback>& s)
362 {
363 return base::get(s.get_fallback_allocator());
364 }
365 };
366 } // namespace detail
367
368 /// Creates multiple nested \ref binary_segregator.
369 /// If you pass one type, it must be a \concept{concept_segregatable,Segregatable}.
370 /// Then the result is a \ref binary_segregator with that `Segregatable` and \ref null_allocator as fallback.
371 /// If you pass two types, the first one must be a `Segregatable`,
372 /// the second one a \concept{concept_rawallocator,RawAllocator}.
373 /// Then the result is a simple \ref binary_segregator with those arguments.
374 /// If you pass more than one, the last one must be a `RawAllocator` all others `Segregatable`,
375 /// the result is `binary_segregator<Head, segregator<Tail...>>`.
376 /// \note It will result in an allocator that tries each `Segregatable` in the order specified
377 /// using the last parameter as final fallback.
378 /// \ingroup adapter
379 template <class... Allocators>
380 FOONATHAN_ALIAS_TEMPLATE(segregator,
381 typename detail::make_segregator_t<Allocators...>::type);
382
383 /// \returns A \ref segregator created from the allocators `args`.
384 /// \relates segregator
385 template <typename... Args>
386 auto make_segregator(Args&&... args) -> segregator<typename std::decay<Args>::type...>
387 {
388 return detail::make_segregator(std::forward<Args>(args)...);
389 }
390
391 /// The number of \concept{concept_segregatable,Segregatable} a \ref segregator has.
392 /// \relates segregator
393 template <class Segregator>
394 struct segregator_size
395 {
396 static const std::size_t value = detail::fallback_type<Segregator>::size;
397 };
398
399 /// The type of the `I`th \concept{concept_segregatable,Segregatable}.
400 /// \relates segregator
401 template <std::size_t I, class Segregator>
402 using segregatable_allocator_type = typename detail::segregatable_type<I, Segregator>::type;
403
404 /// @{
405 /// \returns The `I`th \concept{concept_segregatable,Segregatable}.
406 /// \relates segregrator
407 template <std::size_t I, class Segregator, class Fallback>
408 auto get_segregatable_allocator(binary_segregator<Segregator, Fallback>& s)
409 -> segregatable_allocator_type<I, binary_segregator<Segregator, Fallback>>&
410 {
411 return detail::segregatable_type<I, binary_segregator<Segregator, Fallback>>::get(s);
412 }
413
414 template <std::size_t I, class Segregator, class Fallback>
415 auto get_segregatable_allocator(const binary_segregator<Segregator, Fallback>& s)
416 -> const segregatable_allocator_type<I, binary_segregator<Segregator, Fallback>>
417 {
418 return detail::segregatable_type<I, binary_segregator<Segregator, Fallback>>::get(s);
419 }
420 /// @}
421
422 /// The type of the final fallback \concept{concept_rawallocator,RawAllocator}.
423 /// \relates segregator
424 template <class Segregator>
425 using fallback_allocator_type = typename detail::fallback_type<Segregator>::type;
426
427 /// @{
428 /// \returns The final fallback \concept{concept_rawallocator,RawAllocator}.
429 /// \relates segregator
430 template <class Segregator, class Fallback>
431 auto get_fallback_allocator(binary_segregator<Segregator, Fallback>& s)
432 -> fallback_allocator_type<binary_segregator<Segregator, Fallback>>&
433 {
434 return detail::fallback_type<binary_segregator<Segregator, Fallback>>::get(s);
435 }
436
437 template <class Segregator, class Fallback>
438 auto get_fallback_allocator(const binary_segregator<Segregator, Fallback>& s)
439 -> const fallback_allocator_type<binary_segregator<Segregator, Fallback>>&
440 {
441 return detail::fallback_type<binary_segregator<Segregator, Fallback>>::get(s);
442 }
443 /// @}
444 } // namespace memory
445 } // namespace foonathan
446
447 #endif // FOONATHAN_MEMORY_SEGREGATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_SMART_PTR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_SMART_PTR_HPP_INCLUDED
6
7 /// \file
8 /// \c std::make_unique() / \c std::make_shared() replacement allocating memory through a \concept{concept_rawallocator,RawAllocator}.
9 /// \note Only available on a hosted implementation.
10
11 #include "config.hpp"
12 #if !FOONATHAN_HOSTED_IMPLEMENTATION
13 #error "This header is only available for a hosted implementation."
14 #endif
15
16 #include <memory>
17 #include <type_traits>
18
19 #include "detail/utility.hpp"
20 #include "deleter.hpp"
21 #include "std_allocator.hpp"
22
23 namespace foonathan
24 {
25 namespace memory
26 {
27 namespace detail
28 {
29 template <typename T, class RawAllocator, typename... Args>
30 auto allocate_unique(allocator_reference<RawAllocator> alloc, Args&&... args)
31 -> std::unique_ptr<T, allocator_deleter<T, RawAllocator>>
32 {
33 using raw_ptr = std::unique_ptr<T, allocator_deallocator<T, RawAllocator>>;
34
35 auto memory = alloc.allocate_node(sizeof(T), alignof(T));
36 // raw_ptr deallocates memory in case of constructor exception
37 raw_ptr result(static_cast<T*>(memory), {alloc});
38 // call constructor
39 ::new (memory) T(detail::forward<Args>(args)...);
40 // pass ownership to return value using a deleter that calls destructor
41 return {result.release(), {alloc}};
42 }
43
44 template <typename T, typename... Args>
45 void construct(std::true_type, T* cur, T* end, Args&&... args)
46 {
47 for (; cur != end; ++cur)
48 ::new (static_cast<void*>(cur)) T(detail::forward<Args>(args)...);
49 }
50
51 template <typename T, typename... Args>
52 void construct(std::false_type, T* begin, T* end, Args&&... args)
53 {
54 #if FOONATHAN_HAS_EXCEPTION_SUPPORT
55 auto cur = begin;
56 try
57 {
58 for (; cur != end; ++cur)
59 ::new (static_cast<void*>(cur)) T(detail::forward<Args>(args)...);
60 }
61 catch (...)
62 {
63 for (auto el = begin; el != cur; ++el)
64 el->~T();
65 throw;
66 }
67 #else
68 construct(std::true_type{}, begin, end, detail::forward<Args>(args)...);
69 #endif
70 }
71
72 template <typename T, class RawAllocator>
73 auto allocate_array_unique(std::size_t size, allocator_reference<RawAllocator> alloc)
74 -> std::unique_ptr<T[], allocator_deleter<T[], RawAllocator>>
75 {
76 using raw_ptr = std::unique_ptr<T[], allocator_deallocator<T[], RawAllocator>>;
77
78 auto memory = alloc.allocate_array(size, sizeof(T), alignof(T));
79 // raw_ptr deallocates memory in case of constructor exception
80 raw_ptr result(static_cast<T*>(memory), {alloc, size});
81 construct(std::integral_constant<bool, noexcept_OP(T())>{}, result.get(),
82 result.get() + size);
83 // pass ownership to return value using a deleter that calls destructor
84 return {result.release(), {alloc, size}};
85 }
86 } // namespace detail
87
88 /// A \c std::unique_ptr that deletes using a \concept{concept_rawallocator,RawAllocator}.
89 ///
90 /// It is an alias template using \ref allocator_deleter as \c Deleter class.
91 /// \ingroup adapter
92 template <typename T, class RawAllocator>
93 FOONATHAN_ALIAS_TEMPLATE(unique_ptr,
94 std::unique_ptr<T, allocator_deleter<T, RawAllocator>>);
95
96 /// A \c std::unique_ptr that deletes using a \concept{concept_rawallocator,RawAllocator} and allows polymorphic types.
97 ///
98 /// It can only be created by converting a regular unique pointer to a pointer to a derived class,
99 /// and is meant to be used inside containers.
100 /// It is an alias template using \ref allocator_polymorphic_deleter as \c Deleter class.
101 /// \note It has a relatively high overhead, so only use it if you have to.
102 /// \ingroup adapter
103 template <class BaseType, class RawAllocator>
104 FOONATHAN_ALIAS_TEMPLATE(
105 unique_base_ptr,
106 std::unique_ptr<BaseType, allocator_polymorphic_deleter<BaseType, RawAllocator>>);
107
108 /// Creates a \c std::unique_ptr using a \concept{concept_rawallocator,RawAllocator} for the allocation.
109 /// \effects Allocates memory for the given type using the allocator
110 /// and creates a new object inside it passing the given arguments to its constructor.
111 /// \returns A \c std::unique_ptr owning that memory.
112 /// \note If the allocator is stateful a reference to the \c RawAllocator will be stored inside the deleter,
113 /// the caller has to ensure that the object lives as long as the smart pointer.
114 /// \ingroup adapter
115 template <typename T, class RawAllocator, typename... Args>
116 auto allocate_unique(RawAllocator&& alloc, Args&&... args) -> FOONATHAN_REQUIRES_RET(
117 !std::is_array<T>::value,
118 std::unique_ptr<T, allocator_deleter<T, typename std::decay<RawAllocator>::type>>)
119 {
120 return detail::allocate_unique<T>(make_allocator_reference(
121 detail::forward<RawAllocator>(alloc)),
122 detail::forward<Args>(args)...);
123 }
124
125 /// Creates a \c std::unique_ptr using a type-erased \concept{concept_rawallocator,RawAllocator} for the allocation.
126 /// It is the same as the other overload but stores the reference to the allocator type-erased inside the \c std::unique_ptr.
127 /// \effects Allocates memory for the given type using the allocator
128 /// and creates a new object inside it passing the given arguments to its constructor.
129 /// \returns A \c std::unique_ptr with a type-erased allocator reference owning that memory.
130 /// \note If the allocator is stateful a reference to the \c RawAllocator will be stored inside the deleter,
131 /// the caller has to ensure that the object lives as long as the smart pointer.
132 /// \ingroup adapter
133 template <typename T, class RawAllocator, typename... Args>
134 auto allocate_unique(any_allocator, RawAllocator&& alloc, Args&&... args)
135 -> FOONATHAN_REQUIRES_RET(!std::is_array<T>::value,
136 std::unique_ptr<T, allocator_deleter<T, any_allocator>>)
137 {
138 return detail::allocate_unique<T, any_allocator>(make_allocator_reference(
139 detail::forward<RawAllocator>(
140 alloc)),
141 detail::forward<Args>(args)...);
142 }
143
144 /// Creates a \c std::unique_ptr owning an array using a \concept{concept_rawallocator,RawAllocator} for the allocation.
145 /// \effects Allocates memory for an array of given size and value initializes each element inside of it.
146 /// \returns A \c std::unique_ptr owning that array.
147 /// \note If the allocator is stateful a reference to the \c RawAllocator will be stored inside the deleter,
148 /// the caller has to ensure that the object lives as long as the smart pointer.
149 /// \ingroup adapter
150 template <typename T, class RawAllocator>
151 auto allocate_unique(RawAllocator&& alloc, std::size_t size) -> FOONATHAN_REQUIRES_RET(
152 std::is_array<T>::value,
153 std::unique_ptr<T, allocator_deleter<T, typename std::decay<RawAllocator>::type>>)
154 {
155 return detail::allocate_array_unique<
156 typename std::remove_extent<T>::type>(size,
157 make_allocator_reference(
158 detail::forward<RawAllocator>(alloc)));
159 }
160
161 /// Creates a \c std::unique_ptr owning an array using a type-erased \concept{concept_rawallocator,RawAllocator} for the allocation.
162 /// It is the same as the other overload but stores the reference to the allocator type-erased inside the \c std::unique_ptr.
163 /// \effects Allocates memory for an array of given size and value initializes each element inside of it.
164 /// \returns A \c std::unique_ptr with a type-erased allocator reference owning that array.
165 /// \note If the allocator is stateful a reference to the \c RawAllocator will be stored inside the deleter,
166 /// the caller has to ensure that the object lives as long as the smart pointer.
167 /// \ingroup adapter
168 template <typename T, class RawAllocator>
169 auto allocate_unique(any_allocator, RawAllocator&& alloc, std::size_t size)
170 -> FOONATHAN_REQUIRES_RET(std::is_array<T>::value,
171 std::unique_ptr<T, allocator_deleter<T, any_allocator>>)
172 {
173 return detail::allocate_array_unique<typename std::remove_extent<T>::type,
174 any_allocator>(size,
175 make_allocator_reference(
176 detail::forward<RawAllocator>(
177 alloc)));
178 }
179
180 /// Creates a \c std::shared_ptr using a \concept{concept_rawallocator,RawAllocator} for the allocation.
181 /// It is similar to \c std::allocate_shared but uses a \c RawAllocator (and thus also supports any \c Allocator).
182 /// \effects Calls \ref std_allocator::make_std_allocator to wrap the allocator and forwards to \c std::allocate_shared.
183 /// \returns A \c std::shared_ptr created using \c std::allocate_shared.
184 /// \note If the allocator is stateful a reference to the \c RawAllocator will be stored inside the shared pointer,
185 /// the caller has to ensure that the object lives as long as the smart pointer.
186 /// \ingroup adapter
187 template <typename T, class RawAllocator, typename... Args>
188 std::shared_ptr<T> allocate_shared(RawAllocator&& alloc, Args&&... args)
189 {
190 return std::allocate_shared<T>(make_std_allocator<T>(
191 detail::forward<RawAllocator>(alloc)),
192 detail::forward<Args>(args)...);
193 }
194
195 #if !defined(DOXYGEN)
196 #include "detail/container_node_sizes.hpp"
197 #else
198 /// Contains the node size needed for a `std::shared_ptr`.
199 /// These classes are auto-generated and only available if the tools are build and without cross-compiling.
200 /// \ingroup adapter
201 template <typename T>
202 struct shared_ptr_node_size : std::integral_constant<std::size_t, implementation_defined>
203 {
204 };
205 #endif
206 } // namespace memory
207 } // namespace foonathan
208
209 #endif // FOONATHAN_MEMORY_SMART_PTR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_STATIC_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_STATIC_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Allocators using a static, fixed-sized storage.
9
10 #include <type_traits>
11
12 #include "detail/align.hpp"
13 #include "detail/assert.hpp"
14 #include "detail/memory_stack.hpp"
15 #include "detail/utility.hpp"
16 #include "config.hpp"
17
18 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
19 #include "allocator_traits.hpp"
20 #endif
21
22 namespace foonathan
23 {
24 namespace memory
25 {
26 /// Storage for a \ref static_allocator.
27 /// Its constructor will take a reference to it and use it for its allocation.
28 /// The storage type is simply a \c char array aligned for maximum alignment.
29 /// \note It is not allowed to access the memory of the storage.
30 /// \ingroup allocator
31 template <std::size_t Size>
32 struct static_allocator_storage
33 {
34 alignas(detail::max_alignment) char storage[Size];
35 };
36
37 static_assert(sizeof(static_allocator_storage<1024>) == 1024, "");
38 static_assert(alignof(static_allocator_storage<1024>) == detail::max_alignment, "");
39
40 struct allocator_info;
41
42 /// A stateful \concept{concept_rawallocator,RawAllocator} that uses a fixed sized storage for the allocations.
43 /// It works on a \ref static_allocator_storage and uses its memory for all allocations.
44 /// Deallocations are not supported, memory cannot be marked as freed.<br>
45 /// \note It is not allowed to share an \ref static_allocator_storage between multiple \ref static_allocator objects.
46 /// \ingroup allocator
47 class static_allocator
48 {
49 public:
50 using is_stateful = std::true_type;
51
52 /// \effects Creates it by passing it a \ref static_allocator_storage by reference.
53 /// It will take the address of the storage and use its memory for the allocation.
54 /// \requires The storage object must live as long as the allocator object.
55 /// It must not be shared between multiple allocators,
56 /// i.e. the object must not have been passed to a constructor before.
57 template <std::size_t Size>
58 static_allocator(static_allocator_storage<Size>& storage) noexcept
59 : stack_(&storage), end_(stack_.top() + Size)
60 {
61 }
62
63 /// \effects A \concept{concept_rawallocator,RawAllocator} allocation function.
64 /// It uses the specified \ref static_allocator_storage.
65 /// \returns A pointer to a \concept{concept_node,node}, it will never be \c nullptr.
66 /// \throws An exception of type \ref out_of_memory or whatever is thrown by its handler if the storage is exhausted.
67 void* allocate_node(std::size_t size, std::size_t alignment);
68
69 /// \effects A \concept{concept_rawallocator,RawAllocator} deallocation function.
70 /// It does nothing, deallocation is not supported by this allocator.
71 void deallocate_node(void*, std::size_t, std::size_t) noexcept {}
72
73 /// \returns The maximum node size which is the capacity remaining inside the \ref static_allocator_storage.
74 std::size_t max_node_size() const noexcept
75 {
76 return static_cast<std::size_t>(end_ - stack_.top());
77 }
78
79 /// \returns The maximum possible value since there is no alignment restriction
80 /// (except indirectly through the size of the \ref static_allocator_storage).
81 std::size_t max_alignment() const noexcept
82 {
83 return std::size_t(-1);
84 }
85
86 private:
87 allocator_info info() const noexcept;
88
89 detail::fixed_memory_stack stack_;
90 const char* end_;
91 };
92
93 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
94 extern template class allocator_traits<static_allocator>;
95 #endif
96
97 struct memory_block;
98
99 /// A \concept{concept_blockallocator,BlockAllocator} that allocates the blocks from a fixed size storage.
100 /// It works on a \ref static_allocator_storage and uses it for all allocations,
101 /// deallocations are only allowed in reversed order which is guaranteed by \ref memory_arena.
102 /// \note It is not allowed to share an \ref static_allocator_storage between multiple \ref static_allocator objects.
103 /// \ingroup allocator
104 class static_block_allocator
105 {
106 public:
107 /// \effects Creates it by passing it the block size and a \ref static_allocator_storage by reference.
108 /// It will take the address of the storage and use it to allocate \c block_size'd blocks.
109 /// \requires The storage object must live as long as the allocator object.
110 /// It must not be shared between multiple allocators,
111 /// i.e. the object must not have been passed to a constructor before.
112 /// The size of the \ref static_allocator_storage must be a multiple of the (non-null) block size.
113 template <std::size_t Size>
114 static_block_allocator(std::size_t block_size,
115 static_allocator_storage<Size>& storage) noexcept
116 : cur_(static_cast<char*>(static_cast<void*>(&storage))),
117 end_(cur_ + Size),
118 block_size_(block_size)
119 {
120 FOONATHAN_MEMORY_ASSERT(block_size <= Size);
121 FOONATHAN_MEMORY_ASSERT(Size % block_size == 0u);
122 }
123
124 ~static_block_allocator() noexcept = default;
125
126 /// @{
127 /// \effects Moves the block allocator, it transfers ownership over the \ref static_allocator_storage.
128 /// This does not invalidate any memory blocks.
129 static_block_allocator(static_block_allocator&& other) noexcept
130 : cur_(other.cur_), end_(other.end_), block_size_(other.block_size_)
131 {
132 other.cur_ = other.end_ = nullptr;
133 other.block_size_ = 0;
134 }
135
136 static_block_allocator& operator=(static_block_allocator&& other) noexcept
137 {
138 static_block_allocator tmp(detail::move(other));
139 swap(*this, tmp);
140 return *this;
141 }
142 /// @}
143
144 /// \effects Swaps the ownership over the \ref static_allocator_storage.
145 /// This does not invalidate any memory blocks.
146 friend void swap(static_block_allocator& a, static_block_allocator& b) noexcept
147 {
148 detail::adl_swap(a.cur_, b.cur_);
149 detail::adl_swap(a.end_, b.end_);
150 detail::adl_swap(a.block_size_, b.block_size_);
151 }
152
153 /// \effects Allocates a new block by returning the \ref next_block_size() bytes.
154 /// \returns The new memory block.
155 memory_block allocate_block();
156
157 /// \effects Deallocates the last memory block by marking the block as free again.
158 /// This block will be returned again by the next call to \ref allocate_block().
159 /// \requires \c block must be the current top block of the memory,
160 /// this is guaranteed by \ref memory_arena.
161 void deallocate_block(memory_block block) noexcept;
162
163 /// \returns The next block size, this is the size passed to the constructor.
164 std::size_t next_block_size() const noexcept
165 {
166 return block_size_;
167 }
168
169 private:
170 allocator_info info() const noexcept;
171
172 char * cur_, *end_;
173 std::size_t block_size_;
174 };
175 } // namespace memory
176 } // namespace foonathan
177
178 #endif //FOONATHAN_MEMORY_STATIC_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_STD_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_STD_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::std_allocator and related classes and functions.
9
10 #include <new>
11 #include <type_traits>
12
13 #include "detail/utility.hpp"
14 #include "config.hpp"
15 #include "allocator_storage.hpp"
16 #include "threading.hpp"
17
18 namespace foonathan
19 {
20 namespace memory
21 {
22 namespace traits_detail
23 {
24 template <class RawAllocator>
25 auto propagate_on_container_swap(std_concept) ->
26 typename RawAllocator::propagate_on_container_swap;
27
28 template <class RawAllocator>
29 auto propagate_on_container_swap(min_concept) -> std::true_type;
30
31 template <class RawAllocator>
32 auto propagate_on_container_move_assignment(std_concept) ->
33 typename RawAllocator::propagate_on_container_move_assignment;
34
35 template <class RawAllocator>
36 auto propagate_on_container_move_assignment(min_concept) -> std::true_type;
37
38 template <class RawAllocator>
39 auto propagate_on_container_copy_assignment(std_concept) ->
40 typename RawAllocator::propagate_on_container_copy_assignment;
41
42 template <class RawAllocator>
43 auto propagate_on_container_copy_assignment(min_concept) -> std::true_type;
44 } // namespace traits_detail
45
46 /// Controls the propagation of a \ref std_allocator for a certain \concept{concept_rawallocator,RawAllocator}.
47 /// \ingroup adapter
48 template <class RawAllocator>
49 struct propagation_traits
50 {
51 using propagate_on_container_swap =
52 decltype(traits_detail::propagate_on_container_swap<RawAllocator>(
53 traits_detail::full_concept{}));
54
55 using propagate_on_container_move_assignment =
56 decltype(traits_detail::propagate_on_container_move_assignment<RawAllocator>(
57 traits_detail::full_concept{}));
58
59 using propagate_on_container_copy_assignment =
60 decltype(traits_detail::propagate_on_container_copy_assignment<RawAllocator>(
61 traits_detail::full_concept{}));
62
63 template <class AllocReference>
64 static AllocReference select_on_container_copy_construction(const AllocReference& alloc)
65 {
66 return alloc;
67 }
68 };
69
70 /// Wraps a \concept{concept_rawallocator,RawAllocator} and makes it a "normal" \c Allocator.
71 /// It allows using a \c RawAllocator anywhere a \c Allocator is required.
72 /// \ingroup adapter
73 template <typename T, class RawAllocator>
74 class std_allocator :
75 #if defined _MSC_VER && defined __clang__
76 FOONATHAN_EBO(protected allocator_reference<RawAllocator>)
77 #else
78 FOONATHAN_EBO(allocator_reference<RawAllocator>)
79 #endif
80 {
81 using alloc_reference = allocator_reference<RawAllocator>;
82 // if it is any_allocator_reference an optimized implementation can be used
83 using is_any = std::is_same<alloc_reference, any_allocator_reference>;
84
85 using prop_traits = propagation_traits<RawAllocator>;
86
87 public:
88 //=== typedefs ===//
89 using value_type = T;
90 using pointer = T*;
91 using const_pointer = const T*;
92 using reference = T&;
93 using const_reference = const T&;
94 using size_type = std::size_t;
95 using difference_type = std::ptrdiff_t;
96
97 using propagate_on_container_swap = typename prop_traits::propagate_on_container_swap;
98 using propagate_on_container_move_assignment =
99 typename prop_traits::propagate_on_container_move_assignment;
100 using propagate_on_container_copy_assignment =
101 typename prop_traits::propagate_on_container_copy_assignment;
102
103 template <typename U>
104 struct rebind
105 {
106 using other = std_allocator<U, RawAllocator>;
107 };
108
109 using allocator_type = typename alloc_reference::allocator_type;
110
111 //=== constructor ===//
112 /// \effects Default constructs it by storing a default constructed, stateless \c RawAllocator inside the reference.
113 /// \requires The \c RawAllocator type is stateless, otherwise the body of this function will not compile.
114 std_allocator() noexcept : alloc_reference(allocator_type{})
115 {
116 #if !defined(__GNUC__) || (defined(_GLIBCXX_USE_CXX11_ABI) && _GLIBCXX_USE_CXX11_ABI != 0)
117 // std::string requires default constructor for the small string optimization when using gcc's old ABI
118 // so don't assert then to allow joint allocator
119 static_assert(!alloc_reference::is_stateful::value,
120 "default constructor must not be used for stateful allocators");
121 #endif
122 }
123
124 /// \effects Creates it from a reference to a \c RawAllocator.
125 /// It will store an \ref allocator_reference to it.
126 /// \requires The expression <tt>allocator_reference<RawAllocator>(alloc)</tt> is well-formed,
127 /// that is either \c RawAlloc is the same as \c RawAllocator or \c RawAllocator is the tag type \ref any_allocator.
128 /// If the requirement is not fulfilled this function does not participate in overload resolution.
129 /// \note The caller has to ensure that the lifetime of the \c RawAllocator is at least as long as the lifetime
130 /// of this \ref std_allocator object.
131 template <
132 class RawAlloc,
133 // MSVC seems to ignore access rights in decltype SFINAE below
134 // use this to prevent this constructor being chosen instead of move/copy for types inheriting from it
135 FOONATHAN_REQUIRES((!std::is_base_of<std_allocator, RawAlloc>::value))>
136 std_allocator(RawAlloc& alloc, FOONATHAN_SFINAE(alloc_reference(alloc))) noexcept
137 : alloc_reference(alloc)
138 {
139 }
140
141 /// \effects Creates it from a stateless, temporary \c RawAllocator object.
142 /// It will not store a reference but create it on the fly.
143 /// \requires The \c RawAllocator is stateless
144 /// and the expression <tt>allocator_reference<RawAllocator>(alloc)</tt> is well-formed as above,
145 /// otherwise this function does not participate in overload resolution.
146 template <
147 class RawAlloc,
148 // MSVC seems to ignore access rights in decltype SFINAE below
149 // use this to prevent this constructor being chosen instead of move/copy for types inheriting from it
150 FOONATHAN_REQUIRES((!std::is_base_of<std_allocator, RawAlloc>::value))>
151 std_allocator(const RawAlloc& alloc, FOONATHAN_SFINAE(alloc_reference(alloc))) noexcept
152 : alloc_reference(alloc)
153 {
154 }
155
156 /// \effects Creates it from another \ref allocator_reference using the same allocator type.
157 std_allocator(const alloc_reference& alloc) noexcept : alloc_reference(alloc) {}
158
159 /// \details Implicit conversion from any other \ref allocator_storage is forbidden
160 /// to prevent accidentally wrapping another \ref allocator_storage inside a \ref allocator_reference.
161 template <class StoragePolicy, class OtherMut>
162 std_allocator(const allocator_storage<StoragePolicy, OtherMut>&) = delete;
163
164 /// @{
165 /// \effects Creates it from another \ref std_allocator allocating a different type.
166 /// This is required by the \c Allcoator concept and simply takes the same \ref allocator_reference.
167 template <typename U>
168 std_allocator(const std_allocator<U, RawAllocator>& alloc) noexcept
169 : alloc_reference(alloc)
170 {
171 }
172
173 template <typename U>
174 std_allocator(std_allocator<U, RawAllocator>& alloc) noexcept : alloc_reference(alloc)
175 {
176 }
177 /// @}
178
179 /// \returns A copy of the allocator.
180 /// This is required by the \c Allocator concept and forwards to the \ref propagation_traits.
181 std_allocator<T, RawAllocator> select_on_container_copy_construction() const
182 {
183 return prop_traits::select_on_container_copy_construction(*this);
184 }
185
186 //=== allocation/deallocation ===//
187 /// \effects Allocates memory using the underlying \concept{concept_rawallocator,RawAllocator}.
188 /// If \c n is \c 1, it will call <tt>allocate_node(sizeof(T), alignof(T))</tt>,
189 /// otherwise <tt>allocate_array(n, sizeof(T), alignof(T))</tt>.
190 /// \returns A pointer to a memory block suitable for \c n objects of type \c T.
191 /// \throws Anything thrown by the \c RawAllocator.
192 pointer allocate(size_type n, void* = nullptr)
193 {
194 return static_cast<pointer>(allocate_impl(is_any{}, n));
195 }
196
197 /// \effects Deallcoates memory using the underlying \concept{concept_rawallocator,RawAllocator}.
198 /// It will forward to the deallocation function in the same way as in \ref allocate().
199 /// \requires The pointer must come from a previous call to \ref allocate() with the same \c n on this object or any copy of it.
200 void deallocate(pointer p, size_type n) noexcept
201 {
202 deallocate_impl(is_any{}, p, n);
203 }
204
205 //=== construction/destruction ===//
206 /// \effects Creates an object of type \c U at given address using the passed arguments.
207 template <typename U, typename... Args>
208 void construct(U* p, Args&&... args)
209 {
210 void* mem = p;
211 ::new (mem) U(detail::forward<Args>(args)...);
212 }
213
214 /// \effects Calls the destructor for an object of type \c U at given address.
215 template <typename U>
216 void destroy(U* p) noexcept
217 {
218 // This is to avoid a MSVS 2015 'unreferenced formal parameter' warning
219 (void)p;
220 p->~U();
221 }
222
223 //=== getter ===//
224 /// \returns The maximum size for an allocation which is <tt>max_array_size() / sizeof(value_type)</tt>.
225 /// This is only an upper bound, not the exact maximum.
226 size_type max_size() const noexcept
227 {
228 return this->max_array_size() / sizeof(value_type);
229 }
230
231 /// @{
232 /// \effects Returns a reference to the referenced allocator.
233 /// \returns For stateful allocators: A (\c const) reference to the stored allocator.
234 /// For stateless allocators: A temporary constructed allocator.
235 auto get_allocator() noexcept
236 -> decltype(std::declval<alloc_reference>().get_allocator())
237 {
238 return alloc_reference::get_allocator();
239 }
240
241 auto get_allocator() const noexcept
242 -> decltype(std::declval<const alloc_reference>().get_allocator())
243 {
244 return alloc_reference::get_allocator();
245 }
246 /// @}
247
248 private:
249 // any_allocator_reference: use virtual function which already does a dispatch on node/array
250 void* allocate_impl(std::true_type, size_type n)
251 {
252 return get_allocator().allocate_impl(n, sizeof(T), alignof(T));
253 }
254
255 void deallocate_impl(std::true_type, void* ptr, size_type n)
256 {
257 get_allocator().deallocate_impl(ptr, n, sizeof(T), alignof(T));
258 }
259
260 // alloc_reference: decide between node/array
261 void* allocate_impl(std::false_type, size_type n)
262 {
263 if (n == 1)
264 return this->allocate_node(sizeof(T), alignof(T));
265 else
266 return this->allocate_array(n, sizeof(T), alignof(T));
267 }
268
269 void deallocate_impl(std::false_type, void* ptr, size_type n)
270 {
271 if (n == 1)
272 this->deallocate_node(ptr, sizeof(T), alignof(T));
273 else
274 this->deallocate_array(ptr, n, sizeof(T), alignof(T));
275 }
276
277 template <typename U> // stateful
278 bool equal_to_impl(std::true_type,
279 const std_allocator<U, RawAllocator>& other) const noexcept
280 {
281 return &get_allocator() == &other.get_allocator();
282 }
283
284 template <typename U> // non-stateful
285 bool equal_to_impl(std::false_type,
286 const std_allocator<U, RawAllocator>&) const noexcept
287 {
288 return true;
289 }
290
291 template <typename U> // shared
292 bool equal_to(std::true_type,
293 const std_allocator<U, RawAllocator>& other) const noexcept
294 {
295 return get_allocator() == other.get_allocator();
296 }
297
298 template <typename U> // not shared
299 bool equal_to(std::false_type,
300 const std_allocator<U, RawAllocator>& other) const noexcept
301 {
302 return equal_to_impl(typename allocator_traits<RawAllocator>::is_stateful{}, other);
303 }
304
305 template <typename T1, typename T2, class Impl>
306 friend bool operator==(const std_allocator<T1, Impl>& lhs,
307 const std_allocator<T2, Impl>& rhs) noexcept;
308
309 template <typename U, class OtherRawAllocator>
310 friend class std_allocator;
311 };
312
313 /// \effects Compares two \ref std_allocator object, they are equal if either stateless or reference the same allocator.
314 /// \returns The result of the comparision for equality.
315 /// \relates std_allocator
316 template <typename T, typename U, class Impl>
317 bool operator==(const std_allocator<T, Impl>& lhs,
318 const std_allocator<U, Impl>& rhs) noexcept
319 {
320 return lhs.equal_to(is_shared_allocator<Impl>{}, rhs);
321 }
322
323 /// \effects Compares two \ref std_allocator object, they are equal if either stateless or reference the same allocator.
324 /// \returns The result of the comparision for inequality.
325 /// \relates std_allocator
326 template <typename T, typename U, class Impl>
327 bool operator!=(const std_allocator<T, Impl>& lhs,
328 const std_allocator<U, Impl>& rhs) noexcept
329 {
330 return !(lhs == rhs);
331 }
332
333 /// \returns A new \ref std_allocator for a given type using a certain allocator object.
334 /// \relates std_allocator
335 template <typename T, class RawAllocator>
336 auto make_std_allocator(RawAllocator&& allocator) noexcept
337 -> std_allocator<T, typename std::decay<RawAllocator>::type>
338 {
339 return {detail::forward<RawAllocator>(allocator)};
340 }
341
342 /// An alias template for \ref std_allocator using a type-erased \concept{concept_rawallocator,RawAllocator}.
343 /// This is the same as using a \ref std_allocator with the tag type \ref any_allocator.
344 /// The implementation is optimized to call fewer virtual functions.
345 /// \ingroup adapter
346 template <typename T>
347 FOONATHAN_ALIAS_TEMPLATE(any_std_allocator, std_allocator<T, any_allocator>);
348
349 /// \returns A new \ref any_std_allocator for a given type using a certain allocator object.
350 /// \relates any_std_allocator
351 template <typename T, class RawAllocator>
352 any_std_allocator<T> make_any_std_allocator(RawAllocator&& allocator) noexcept
353 {
354 return {detail::forward<RawAllocator>(allocator)};
355 }
356 } // namespace memory
357 } // namespace foonathan
358
359 #endif // FOONATHAN_MEMORY_STD_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_TEMPORARY_ALLOCATOR_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_TEMPORARY_ALLOCATOR_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::temporary_allocator and related functions.
9
10 #include "config.hpp"
11 #include "memory_stack.hpp"
12
13 #if FOONATHAN_MEMORY_TEMPORARY_STACK_MODE >= 2
14 #include <atomic>
15 #endif
16
17 namespace foonathan
18 {
19 namespace memory
20 {
21 class temporary_allocator;
22 class temporary_stack;
23
24 namespace detail
25 {
26 class temporary_block_allocator
27 {
28 public:
29 explicit temporary_block_allocator(std::size_t block_size) noexcept;
30
31 memory_block allocate_block();
32
33 void deallocate_block(memory_block block);
34
35 std::size_t next_block_size() const noexcept
36 {
37 return block_size_;
38 }
39
40 using growth_tracker = void (*)(std::size_t size);
41
42 growth_tracker set_growth_tracker(growth_tracker t) noexcept;
43
44 growth_tracker get_growth_tracker() noexcept;
45
46 private:
47 growth_tracker tracker_;
48 std::size_t block_size_;
49 };
50
51 using temporary_stack_impl = memory_stack<temporary_block_allocator>;
52
53 class temporary_stack_list;
54
55 #if FOONATHAN_MEMORY_TEMPORARY_STACK_MODE >= 2
56 class temporary_stack_list_node
57 {
58 public:
59 // doesn't add into list
60 temporary_stack_list_node() noexcept : in_use_(true) {}
61
62 temporary_stack_list_node(int) noexcept;
63
64 ~temporary_stack_list_node() noexcept {}
65
66 private:
67 temporary_stack_list_node* next_ = nullptr;
68 std::atomic<bool> in_use_;
69
70 friend temporary_stack_list;
71 };
72
73 static class temporary_allocator_dtor_t
74 {
75 public:
76 temporary_allocator_dtor_t() noexcept;
77 ~temporary_allocator_dtor_t() noexcept;
78 } temporary_allocator_dtor;
79 #else
80 class temporary_stack_list_node
81 {
82 protected:
83 temporary_stack_list_node() noexcept {}
84
85 temporary_stack_list_node(int) noexcept {}
86
87 ~temporary_stack_list_node() noexcept {}
88 };
89 #endif
90 } // namespace detail
91
92 /// A wrapper around the \ref memory_stack that is used by the \ref temporary_allocator.
93 /// There should be at least one per-thread.
94 /// \ingroup allocator
95 class temporary_stack : FOONATHAN_EBO(detail::temporary_stack_list_node)
96 {
97 public:
98 /// The type of the handler called when the internal \ref memory_stack grows.
99 /// It gets the size of the new block that will be allocated.
100 /// \requiredbe The handler shall log the growth, throw an exception or aborts the program.
101 /// If this function does not return, the growth is prevented but the allocator unusable until memory is freed.
102 /// \defaultbe The default handler does nothing.
103 using growth_tracker = detail::temporary_block_allocator::growth_tracker;
104
105 /// \effects Sets \c h as the new \ref growth_tracker.
106 /// A \c nullptr sets the default \ref growth_tracker.
107 /// Each thread has its own, separate tracker.
108 /// \returns The previous \ref growth_tracker. This is never \c nullptr.
109 growth_tracker set_growth_tracker(growth_tracker t) noexcept
110 {
111 return stack_.get_allocator().set_growth_tracker(t);
112 }
113
114 /// \returns The current \ref growth_tracker. This is never \c nullptr.
115 growth_tracker get_growth_tracker() noexcept
116 {
117 return stack_.get_allocator().get_growth_tracker();
118 }
119
120 /// \effects Creates it with a given initial size of the stack.
121 /// It can grow if needed, although that is expensive.
122 /// \requires `initial_size` must be greater than `0`.
123 explicit temporary_stack(std::size_t initial_size) : stack_(initial_size), top_(nullptr)
124 {
125 }
126
127 /// \returns `next_capacity()` of the internal `memory_stack`.
128 std::size_t next_capacity() const noexcept
129 {
130 return stack_.next_capacity();
131 }
132
133 private:
134 temporary_stack(int i, std::size_t initial_size)
135 : detail::temporary_stack_list_node(i), stack_(initial_size), top_(nullptr)
136 {
137 }
138
139 using marker = detail::temporary_stack_impl::marker;
140
141 marker top() const noexcept
142 {
143 return stack_.top();
144 }
145
146 void unwind(marker m) noexcept
147 {
148 stack_.unwind(m);
149 }
150
151 detail::temporary_stack_impl stack_;
152 temporary_allocator* top_;
153
154 #if !defined(DOXYGEN)
155 friend temporary_allocator;
156 friend memory_stack_raii_unwind<temporary_stack>;
157 friend detail::temporary_stack_list;
158 #endif
159 };
160
161 /// Manually takes care of the lifetime of the per-thread \ref temporary_stack.
162 /// The constructor will create it, if not already done, and the destructor will destroy it, if not already done.
163 /// \note If there are multiple objects in a thread,
164 /// this will lead to unnecessary construction and destruction of the stack.
165 /// It is thus adviced to create one object on the top-level function of the thread, e.g. in `main()`.
166 /// \note If `FOONATHAN_MEMORY_TEMPORARY_STACK_MODE == 2`, it is not necessary to use this class,
167 /// the nifty counter will clean everything upon program termination.
168 /// But it can still be used as an optimization if you have a thread that is terminated long before program exit.
169 /// The automatic clean up will only occur much later.
170 /// \note If `FOONATHAN_MEMORY_TEMPORARY_STACK_MODE == 0`, the use of this class has no effect,
171 /// because the per-thread stack is disabled.
172 /// \relatesalso temporary_stack
173 class temporary_stack_initializer
174 {
175 public:
176 static constexpr std::size_t default_stack_size = 4096u;
177
178 static const struct defer_create_t
179 {
180 defer_create_t() noexcept {}
181 } defer_create;
182
183 /// \effects Does not create the per-thread stack.
184 /// It will be created by the first call to \ref get_temporary_stack() in the current thread.
185 /// \note If `FOONATHAN_MEMORY_TEMPORARY_STACK_MODE == 0`, this function has no effect.
186 temporary_stack_initializer(defer_create_t) noexcept {}
187
188 /// \effects Creates the per-thread stack with the given default size if it wasn't already created.
189 /// \requires `initial_size` must not be `0` if `FOONATHAN_MEMORY_TEMPORARY_STACK_MODE != 0`.
190 /// \note If `FOONATHAN_MEMORY_TEMPORARY_STACK_MODE == 0`, this function will issue a warning in debug mode.
191 /// This can be disabled by passing `0` as the initial size.
192 temporary_stack_initializer(std::size_t initial_size = default_stack_size);
193
194 /// \effects Destroys the per-thread stack if it isn't already destroyed.
195 ~temporary_stack_initializer() noexcept;
196
197 temporary_stack_initializer(temporary_stack_initializer&&) = delete;
198 temporary_stack_initializer& operator=(temporary_stack_initializer&&) = delete;
199 };
200
201 /// \effects Creates the per-thread \ref temporary_stack with the given initial size,
202 /// if it wasn't already created.
203 /// \returns The per-thread \ref temporary_stack.
204 /// \requires There must be a per-thread temporary stack (\ref FOONATHAN_MEMORY_TEMPORARY_STACK_MODE must not be equal to `0`).
205 /// \note If \ref FOONATHAN_MEMORY_TEMPORARY_STACK_MODE is equal to `1`,
206 /// this function can create the temporary stack.
207 /// But if there is no \ref temporary_stack_initializer, it won't be destroyed.
208 /// \relatesalso temporary_stack
209 temporary_stack& get_temporary_stack(
210 std::size_t initial_size = temporary_stack_initializer::default_stack_size);
211
212 /// A stateful \concept{concept_rawallocator,RawAllocator} that handles temporary allocations.
213 /// It works similar to \c alloca() but uses a seperate \ref memory_stack for the allocations,
214 /// instead of the actual program stack.
215 /// This avoids the stack overflow error and is portable,
216 /// with a similar speed.
217 /// All allocations done in the scope of the allocator object are automatically freed when the object is destroyed.
218 /// \ingroup allocator
219 class temporary_allocator
220 {
221 public:
222 /// \effects Creates it by using the \ref get_temporary_stack() to get the temporary stack.
223 /// \requires There must be a per-thread temporary stack (\ref FOONATHAN_MEMORY_TEMPORARY_STACK_MODE must not be equal to `0`).
224 temporary_allocator();
225
226 /// \effects Creates it by giving it the \ref temporary_stack it uses for allocation.
227 explicit temporary_allocator(temporary_stack& stack);
228
229 ~temporary_allocator() noexcept;
230
231 temporary_allocator(temporary_allocator&&) = delete;
232 temporary_allocator& operator=(temporary_allocator&&) = delete;
233
234 /// \effects Allocates memory from the internal \ref memory_stack by forwarding to it.
235 /// \returns The result of \ref memory_stack::allocate().
236 /// \requires `is_active()` must return `true`.
237 void* allocate(std::size_t size, std::size_t alignment);
238
239 /// \returns Whether or not the allocator object is active.
240 /// \note The active allocator object is the last object created for one stack.
241 /// Moving changes the active allocator.
242 bool is_active() const noexcept;
243
244 /// \effects Instructs it to release unnecessary memory after automatic unwinding occurs.
245 /// This will effectively forward to \ref memory_stack::shrink_to_fit() of the internal stack.
246 /// \note Like the use of the \ref temporary_stack_initializer this can be used as an optimization,
247 /// to tell when the thread's \ref temporary_stack isn't needed anymore and can be destroyed.
248 /// \note It doesn't call shrink to fit immediately, only in the destructor!
249 void shrink_to_fit() noexcept;
250
251 /// \returns The internal stack the temporary allocator is using.
252 /// \requires `is_active()` must return `true`.
253 temporary_stack& get_stack() const noexcept
254 {
255 return unwind_.get_stack();
256 }
257
258 private:
259 memory_stack_raii_unwind<temporary_stack> unwind_;
260 temporary_allocator* prev_;
261 bool shrink_to_fit_;
262 };
263
264 template <class Allocator>
265 class allocator_traits;
266
267 /// Specialization of the \ref allocator_traits for \ref temporary_allocator classes.
268 /// \note It is not allowed to mix calls through the specialization and through the member functions,
269 /// i.e. \ref temporary_allocator::allocate() and this \c allocate_node().
270 /// \ingroup allocator
271 template <>
272 class allocator_traits<temporary_allocator>
273 {
274 public:
275 using allocator_type = temporary_allocator;
276 using is_stateful = std::true_type;
277
278 /// \returns The result of \ref temporary_allocator::allocate().
279 static void* allocate_node(allocator_type& state, std::size_t size,
280 std::size_t alignment)
281 {
282 detail::check_allocation_size<bad_node_size>(size,
283 [&] { return max_node_size(state); },
284 {FOONATHAN_MEMORY_LOG_PREFIX
285 "::temporary_allocator",
286 &state});
287 return state.allocate(size, alignment);
288 }
289
290 /// \returns The result of \ref temporary_allocator::allocate().
291 static void* allocate_array(allocator_type& state, std::size_t count, std::size_t size,
292 std::size_t alignment)
293 {
294 return allocate_node(state, count * size, alignment);
295 }
296
297 /// @{
298 /// \effects Does nothing besides bookmarking for leak checking, if that is enabled.
299 /// Actual deallocation will be done automatically if the allocator object goes out of scope.
300 static void deallocate_node(const allocator_type&, void*, std::size_t,
301 std::size_t) noexcept
302 {
303 }
304
305 static void deallocate_array(const allocator_type&, void*, std::size_t, std::size_t,
306 std::size_t) noexcept
307 {
308 }
309 /// @}
310
311 /// @{
312 /// \returns The maximum size which is \ref memory_stack::next_capacity() of the internal stack.
313 static std::size_t max_node_size(const allocator_type& state) noexcept
314 {
315 return state.get_stack().next_capacity();
316 }
317
318 static std::size_t max_array_size(const allocator_type& state) noexcept
319 {
320 return max_node_size(state);
321 }
322 /// @}
323
324 /// \returns The maximum possible value since there is no alignment restriction
325 /// (except indirectly through \ref memory_stack::next_capacity()).
326 static std::size_t max_alignment(const allocator_type&) noexcept
327 {
328 return std::size_t(-1);
329 }
330 };
331 } // namespace memory
332 } // namespace foonathan
333
334 #endif // FOONATHAN_MEMORY_TEMPORARY_ALLOCATOR_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_THREADING_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_THREADING_HPP_INCLUDED
6
7 /// \file
8 /// The mutex types.
9
10 #include <type_traits>
11
12 #include "allocator_traits.hpp"
13 #include "config.hpp"
14
15 #if FOONATHAN_HOSTED_IMPLEMENTATION
16 #include <mutex>
17 #endif
18
19 namespace foonathan
20 {
21 namespace memory
22 {
23 /// A dummy \c Mutex class that does not lock anything.
24 /// It is a valid \c Mutex and can be used to disable locking anywhere a \c Mutex is requested.
25 /// \ingroup core
26 struct no_mutex
27 {
28 void lock() noexcept {}
29
30 bool try_lock() noexcept
31 {
32 return true;
33 }
34
35 void unlock() noexcept {}
36 };
37
38 /// Specifies whether or not a \concept{concept_rawallocator,RawAllocator} is thread safe as-is.
39 /// This allows to use \ref no_mutex as an optimization.
40 /// Note that stateless allocators are implictly thread-safe.
41 /// Specialize it only for your own stateful allocators.
42 /// \ingroup core
43 template <class RawAllocator>
44 struct is_thread_safe_allocator
45 : std::integral_constant<bool, !allocator_traits<RawAllocator>::is_stateful::value>
46 {
47 };
48
49 namespace detail
50 {
51 // selects a mutex for an Allocator
52 // stateless allocators don't need locking
53 template <class RawAllocator, class Mutex>
54 using mutex_for =
55 typename std::conditional<is_thread_safe_allocator<RawAllocator>::value, no_mutex,
56 Mutex>::type;
57
58 // storage for mutexes to use EBO
59 // it provides const lock/unlock function, inherit from it
60 template <class Mutex>
61 class mutex_storage
62 {
63 public:
64 mutex_storage() noexcept = default;
65 mutex_storage(const mutex_storage&) noexcept {}
66
67 mutex_storage& operator=(const mutex_storage&) noexcept
68 {
69 return *this;
70 }
71
72 void lock() const
73 {
74 mutex_.lock();
75 }
76
77 void unlock() const noexcept
78 {
79 mutex_.unlock();
80 }
81
82 protected:
83 ~mutex_storage() noexcept = default;
84
85 private:
86 mutable Mutex mutex_;
87 };
88
89 template <>
90 class mutex_storage<no_mutex>
91 {
92 public:
93 mutex_storage() noexcept = default;
94
95 void lock() const noexcept {}
96 void unlock() const noexcept {}
97
98 protected:
99 ~mutex_storage() noexcept = default;
100 };
101
102 // non changeable pointer to an Allocator that keeps a lock
103 // I don't think EBO is necessary here...
104 template <class Alloc, class Mutex>
105 class locked_allocator
106 {
107 public:
108 locked_allocator(Alloc& alloc, Mutex& m) noexcept : mutex_(&m), alloc_(&alloc)
109 {
110 mutex_->lock();
111 }
112
113 locked_allocator(locked_allocator&& other) noexcept
114 : mutex_(other.mutex_), alloc_(other.alloc_)
115 {
116 other.mutex_ = nullptr;
117 other.alloc_ = nullptr;
118 }
119
120 ~locked_allocator() noexcept
121 {
122 if (mutex_)
123 mutex_->unlock();
124 }
125
126 locked_allocator& operator=(locked_allocator&& other) noexcept = delete;
127
128 Alloc& operator*() const noexcept
129 {
130 FOONATHAN_MEMORY_ASSERT(alloc_);
131 return *alloc_;
132 }
133
134 Alloc* operator->() const noexcept
135 {
136 FOONATHAN_MEMORY_ASSERT(alloc_);
137 return alloc_;
138 }
139
140 private:
141 Mutex* mutex_; // don't use unqiue_lock to avoid dependency
142 Alloc* alloc_;
143 };
144
145 template <class Alloc, class Mutex>
146 locked_allocator<Alloc, Mutex> lock_allocator(Alloc& a, Mutex& m)
147 {
148 return {a, m};
149 }
150 } // namespace detail
151 } // namespace memory
152 } // namespace foonathan
153
154 #endif // FOONATHAN_MEMORY_THREADING_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_TRACKING_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_TRACKING_HPP_INCLUDED
6
7 /// \file
8 /// Class \ref foonathan::memory::tracked_allocator and related classes and functions.
9
10 #include "detail/utility.hpp"
11 #include "allocator_traits.hpp"
12 #include "memory_arena.hpp"
13
14 namespace foonathan
15 {
16 namespace memory
17 {
18 namespace detail
19 {
20 template <class Allocator, class Tracker>
21 auto set_tracker(int, Allocator& allocator, Tracker* tracker) noexcept
22 -> decltype(allocator.get_allocator().set_tracker(tracker))
23 {
24 return allocator.get_allocator().set_tracker(tracker);
25 }
26 template <class Allocator, class Tracker>
27 void set_tracker(short, Allocator&, Tracker*) noexcept
28 {
29 }
30
31 // used with deeply_tracked_allocator
32 template <class Tracker, class BlockAllocator>
33 class deeply_tracked_block_allocator : FOONATHAN_EBO(BlockAllocator)
34 {
35 public:
36 template <typename... Args>
37 deeply_tracked_block_allocator(std::size_t block_size, Args&&... args)
38 : BlockAllocator(block_size, detail::forward<Args>(args)...), tracker_(nullptr)
39 {
40 }
41
42 memory_block allocate_block()
43 {
44 auto block = BlockAllocator::allocate_block();
45 if (tracker_) // on first call tracker_ is nullptr
46 tracker_->on_allocator_growth(block.memory, block.size);
47 return block;
48 }
49
50 void deallocate_block(memory_block block) noexcept
51 {
52 if (tracker_) // on last call tracker_ is nullptr again
53 tracker_->on_allocator_shrinking(block.memory, block.size);
54 BlockAllocator::deallocate_block(block);
55 }
56
57 std::size_t next_block_size() const noexcept
58 {
59 return BlockAllocator::next_block_size();
60 }
61
62 void set_tracker(Tracker* tracker) noexcept
63 {
64 tracker_ = tracker;
65 }
66
67 private:
68 Tracker* tracker_;
69 };
70 } // namespace detail
71
72 /// A \concept{concept_blockallocator,BlockAllocator} adapter that tracks another allocator using a \concept{concept_tracker,tracker}.
73 /// It wraps another \concept{concept_blockallocator,BlockAllocator} and calls the tracker function before forwarding to it.
74 /// The class can then be used anywhere a \concept{concept_blockallocator,BlockAllocator} is required and the memory usage will be tracked.<br>
75 /// It will only call the <tt>on_allocator_growth()</tt> and <tt>on_allocator_shrinking()</tt> tracking functions,
76 /// since a \concept{concept_blockallocator,BlockAllocator} is normally used inside higher allocators only.
77 /// \ingroup adapter
78 template <class Tracker, class BlockOrRawAllocator>
79 class tracked_block_allocator
80 : FOONATHAN_EBO(Tracker, make_block_allocator_t<BlockOrRawAllocator>)
81 {
82 public:
83 using allocator_type = make_block_allocator_t<BlockOrRawAllocator>;
84 using tracker = Tracker;
85
86 /// @{
87 /// \effects Creates it by giving it a \concept{concept_tracker,tracker} and the tracked \concept{concept_rawallocator,RawAllocator}.
88 /// It will embed both objects.
89 explicit tracked_block_allocator(tracker t = {}) noexcept : tracker(detail::move(t)) {}
90
91 tracked_block_allocator(tracker t, allocator_type&& alloc) noexcept
92 : tracker(detail::move(t)), allocator_type(detail::move(alloc))
93 {
94 }
95 /// @}
96
97 /// \effects Creates it in the form required by the concept.
98 /// The allocator will be constructed using \c block_size and \c args.
99 template <typename... Args>
100 tracked_block_allocator(std::size_t block_size, tracker t, Args&&... args)
101 : tracker(detail::move(t)), allocator_type(block_size, detail::forward<Args>(args)...)
102 {
103 }
104
105 /// \effects Calls <tt>Tracker::on_allocator_growth()</tt> after forwarding to the allocator.
106 /// \returns The block as the returned by the allocator.
107 memory_block allocate_block()
108 {
109 auto block = allocator_type::allocate_block();
110 this->on_allocator_growth(block.memory, block.size);
111 return block;
112 }
113
114 /// \effects Calls <tt>Tracker::on_allocator_shrinking()</tt> and forwards to the allocator.
115 void deallocate_block(memory_block block) noexcept
116 {
117 this->on_allocator_shrinking(block.memory, block.size);
118 allocator_type::deallocate_block(block);
119 }
120
121 /// \returns The next block size as returned by the allocator.
122 std::size_t next_block_size() const noexcept
123 {
124 return allocator_type::next_block_size();
125 }
126
127 /// @{
128 /// \returns A (const) reference to the used allocator.
129 allocator_type& get_allocator() noexcept
130 {
131 return *this;
132 }
133
134 const allocator_type& get_allocator() const noexcept
135 {
136 return *this;
137 }
138 /// @}
139
140 /// @{
141 /// \returns A (const) reference to the tracker.
142 tracker& get_tracker() noexcept
143 {
144 return *this;
145 }
146
147 const tracker& get_tracker() const noexcept
148 {
149 return *this;
150 }
151 /// @}
152 };
153
154 /// Similar to \ref tracked_block_allocator, but shares the tracker with the higher level allocator.
155 /// This allows tracking both (de-)allocations and growth with one tracker.
156 /// \note Due to implementation reasons, it cannot track growth and shrinking in the constructor/destructor of the higher level allocator.
157 /// \ingroup adapter
158 template <class Tracker, class BlockOrRawAllocator>
159 using deeply_tracked_block_allocator = FOONATHAN_IMPL_DEFINED(
160 detail::deeply_tracked_block_allocator<Tracker,
161 make_block_allocator_t<BlockOrRawAllocator>>);
162
163 /// A \concept{concept_rawallocator,RawAllocator} adapter that tracks another allocator using a \concept{concept_tracker,tracker}.
164 /// It wraps another \concept{concept_rawallocator,RawAllocator} and calls the tracker function before forwarding to it.
165 /// The class can then be used anywhere a \concept{concept_rawallocator,RawAllocator} is required and the memory usage will be tracked.<br>
166 /// If the \concept{concept_rawallocator,RawAllocator} uses \ref deeply_tracked_block_allocator as \concept{concept_blockallocator,BlockAllocator},
167 /// it will also track growth and shrinking of the allocator.
168 /// \ingroup adapter
169 template <class Tracker, class RawAllocator>
170 class tracked_allocator
171 : FOONATHAN_EBO(Tracker, allocator_traits<RawAllocator>::allocator_type)
172 {
173 using traits = allocator_traits<RawAllocator>;
174 using composable_traits = composable_allocator_traits<RawAllocator>;
175
176 public:
177 using allocator_type = typename allocator_traits<RawAllocator>::allocator_type;
178 using tracker = Tracker;
179
180 using is_stateful = std::integral_constant<bool, traits::is_stateful::value
181 || !std::is_empty<Tracker>::value>;
182
183 /// @{
184 /// \effects Creates it by giving it a \concept{concept_tracker,tracker} and the tracked \concept{concept_rawallocator,RawAllocator}.
185 /// It will embed both objects.
186 /// \note This will never call the <tt>Tracker::on_allocator_growth()</tt> function.
187 explicit tracked_allocator(tracker t = {}) noexcept
188 : tracked_allocator(detail::move(t), allocator_type{})
189 {
190 }
191
192 tracked_allocator(tracker t, allocator_type&& allocator) noexcept
193 : tracker(detail::move(t)), allocator_type(detail::move(allocator))
194 {
195 detail::set_tracker(0, get_allocator(), &get_tracker());
196 }
197 /// @}
198
199 /// \effects Destroys both tracker and allocator.
200 /// \note This will never call the <tt>Tracker::on_allocator_shrinking()</tt> function.
201 ~tracked_allocator() noexcept
202 {
203 detail::set_tracker(0, get_allocator(), static_cast<tracker*>(nullptr));
204 }
205
206 /// @{
207 /// \effects Moving moves both the tracker and the allocator.
208 tracked_allocator(tracked_allocator&& other) noexcept
209 : tracker(detail::move(other)), allocator_type(detail::move(other))
210 {
211 detail::set_tracker(0, get_allocator(), &get_tracker());
212 }
213
214 tracked_allocator& operator=(tracked_allocator&& other) noexcept
215 {
216 tracker:: operator=(detail::move(other));
217 allocator_type::operator=(detail::move(other));
218 detail::set_tracker(0, get_allocator(), &get_tracker());
219 return *this;
220 }
221 /// @}
222
223 /// \effects Calls <tt>Tracker::on_node_allocation()</tt> and forwards to the allocator.
224 /// If a growth occurs and the allocator is deeply tracked, also calls <tt>Tracker::on_allocator_growth()</tt>.
225 /// \returns The result of <tt>allocate_node()</tt>
226 void* allocate_node(std::size_t size, std::size_t alignment)
227 {
228 auto mem = traits::allocate_node(get_allocator(), size, alignment);
229 this->on_node_allocation(mem, size, alignment);
230 return mem;
231 }
232
233 /// \effects Calls the composable node allocation function.
234 /// If allocation was successful, also calls `Tracker::on_node_allocation()`.
235 /// \returns The result of `try_allocate_node()`.
236 void* try_allocate_node(std::size_t size, std::size_t alignment) noexcept
237 {
238 auto mem = composable_traits::try_allocate_node(get_allocator(), size, alignment);
239 if (mem)
240 this->on_node_allocation(mem, size, alignment);
241 return mem;
242 }
243
244 /// \effects Calls <tt>Tracker::on_array_allocation()</tt> and forwards to the allocator.
245 /// If a growth occurs and the allocator is deeply tracked, also calls <tt>Tracker::on_allocator_growth()</tt>.
246 /// \returns The result of <tt>allocate_array()</tt>
247 void* allocate_array(std::size_t count, std::size_t size, std::size_t alignment)
248 {
249 auto mem = traits::allocate_array(get_allocator(), count, size, alignment);
250 this->on_array_allocation(mem, count, size, alignment);
251 return mem;
252 }
253
254 /// \effects Calls the composable array allocation function.
255 /// If allocation was succesful, also calls `Tracker::on_array_allocation()`.
256 /// \returns The result of `try_allocate_array()`.
257 void* try_allocate_array(std::size_t count, std::size_t size,
258 std::size_t alignment) noexcept
259 {
260 auto mem =
261 composable_traits::try_allocate_array(get_allocator(), count, size, alignment);
262 if (mem)
263 this->on_array_allocation(mem, count, size, alignment);
264 return mem;
265 }
266
267 /// \effects Calls <tt>Tracker::on_node_deallocation()</tt> and forwards to the allocator's <tt>deallocate_node()</tt>.
268 /// If shrinking occurs and the allocator is deeply tracked, also calls <tt>Tracker::on_allocator_shrinking()</tt>.
269 void deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
270 {
271 this->on_node_deallocation(ptr, size, alignment);
272 traits::deallocate_node(get_allocator(), ptr, size, alignment);
273 }
274
275 /// \effects Calls the composable node deallocation function.
276 /// If it was succesful, also calls `Tracker::on_node_deallocation()`.
277 /// \returns The result of `try_deallocate_node()`.
278 bool try_deallocate_node(void* ptr, std::size_t size, std::size_t alignment) noexcept
279 {
280 auto res =
281 composable_traits::try_deallocate_node(get_allocator(), ptr, size, alignment);
282 if (res)
283 this->on_node_deallocation(ptr, size, alignment);
284 return res;
285 }
286
287 /// \effects Calls <tt>Tracker::on_array_deallocation()</tt> and forwards to the allocator's <tt>deallocate_array()</tt>.
288 /// If shrinking occurs and the allocator is deeply tracked, also calls <tt>Tracker::on_allocator_shrinking()</tt>.
289 void deallocate_array(void* ptr, std::size_t count, std::size_t size,
290 std::size_t alignment) noexcept
291 {
292 this->on_array_deallocation(ptr, count, size, alignment);
293 traits::deallocate_array(get_allocator(), ptr, count, size, alignment);
294 }
295
296 /// \effects Calls the composable array deallocation function.
297 /// If it was succesful, also calls `Tracker::on_array_deallocation()`.
298 /// \returns The result of `try_deallocate_array()`.
299 bool try_deallocate_array(void* ptr, std::size_t count, std::size_t size,
300 std::size_t alignment) noexcept
301 {
302 auto res = composable_traits::try_deallocate_array(ptr, count, size, alignment);
303 if (res)
304 this->on_array_deallocation(ptr, count, size, alignment);
305 return res;
306 }
307
308 /// @{
309 /// \returns The result of the corresponding function on the wrapped allocator.
310 std::size_t max_node_size() const
311 {
312 return traits::max_node_size(get_allocator());
313 }
314
315 std::size_t max_array_size() const
316 {
317 return traits::max_array_size(get_allocator());
318 }
319
320 std::size_t max_alignment() const
321 {
322 return traits::max_alignment(get_allocator());
323 }
324 /// @}
325
326 /// @{
327 /// \returns A (\c const) reference to the wrapped allocator.
328 allocator_type& get_allocator() noexcept
329 {
330 return *this;
331 }
332
333 const allocator_type& get_allocator() const noexcept
334 {
335 return *this;
336 }
337 /// @}
338
339 /// @{
340 /// \returns A (\c const) reference to the tracker.
341 tracker& get_tracker() noexcept
342 {
343 return *this;
344 }
345
346 const tracker& get_tracker() const noexcept
347 {
348 return *this;
349 }
350 /// @}
351 };
352
353 /// \effects Takes a \concept{concept_rawallocator,RawAllocator} and wraps it with a \concept{concept_tracker,tracker}.
354 /// \returns A \ref tracked_allocator with the corresponding parameters forwarded to the constructor.
355 /// \relates tracked_allocator
356 template <class Tracker, class RawAllocator>
357 auto make_tracked_allocator(Tracker t, RawAllocator&& alloc)
358 -> tracked_allocator<Tracker, typename std::decay<RawAllocator>::type>
359 {
360 return tracked_allocator<Tracker, typename std::decay<RawAllocator>::type>{detail::move(
361 t),
362 detail::move(
363 alloc)};
364 }
365
366 namespace detail
367 {
368 template <typename T, bool Block>
369 struct is_block_or_raw_allocator_impl : std::true_type
370 {
371 };
372
373 template <typename T>
374 struct is_block_or_raw_allocator_impl<T, false> : memory::is_raw_allocator<T>
375 {
376 };
377
378 template <typename T>
379 struct is_block_or_raw_allocator
380 : is_block_or_raw_allocator_impl<T, memory::is_block_allocator<T>::value>
381 {
382 };
383
384 template <class RawAllocator, class BlockAllocator>
385 struct rebind_block_allocator;
386
387 template <template <typename...> class RawAllocator, typename... Args,
388 class OtherBlockAllocator>
389 struct rebind_block_allocator<RawAllocator<Args...>, OtherBlockAllocator>
390 {
391 using type =
392 RawAllocator<typename std::conditional<is_block_or_raw_allocator<Args>::value,
393 OtherBlockAllocator, Args>::type...>;
394 };
395
396 template <class Tracker, class RawAllocator>
397 using deeply_tracked_block_allocator_for =
398 memory::deeply_tracked_block_allocator<Tracker,
399 typename RawAllocator::allocator_type>;
400
401 template <class Tracker, class RawAllocator>
402 using rebound_allocator = typename rebind_block_allocator<
403 RawAllocator, deeply_tracked_block_allocator_for<Tracker, RawAllocator>>::type;
404 } // namespace detail
405
406 /// A \ref tracked_allocator that has rebound any \concept{concept_blockallocator,BlockAllocator} to the corresponding \ref deeply_tracked_block_allocator.
407 /// This makes it a deeply tracked allocator.<br>
408 /// It replaces each template argument of the given \concept{concept_rawallocator,RawAllocator} for which \ref is_block_allocator or \ref is_raw_allocator is \c true with a \ref deeply_tracked_block_allocator.
409 /// \ingroup adapter
410 template <class Tracker, class RawAllocator>
411 FOONATHAN_ALIAS_TEMPLATE(
412 deeply_tracked_allocator,
413 tracked_allocator<Tracker, detail::rebound_allocator<Tracker, RawAllocator>>);
414
415 /// \effects Takes a \concept{concept_rawallocator,RawAllocator} and deeply wraps it with a \concept{concept_tracker,tracker}.
416 /// \returns A \ref deeply_tracked_allocator with the corresponding parameters forwarded to the constructor.
417 /// \relates deeply_tracked_allocator
418 template <class RawAllocator, class Tracker, typename... Args>
419 auto make_deeply_tracked_allocator(Tracker t, Args&&... args)
420 -> deeply_tracked_allocator<Tracker, RawAllocator>
421 {
422 return deeply_tracked_allocator<Tracker, RawAllocator>(detail::move(t),
423 {detail::forward<Args>(
424 args)...});
425 }
426 } // namespace memory
427 } // namespace foonathan
428
429 #endif // FOONATHAN_MEMORY_TRACKING_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_VIRTUAL_MEMORY_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_VIRTUAL_MEMORY_HPP_INCLUDED
6
7 /// \file
8 /// Virtual memory api and (low-level) allocator classes.
9
10 #include <cstddef>
11 #include <type_traits>
12
13 #include "detail/debug_helpers.hpp"
14 #include "detail/utility.hpp"
15 #include "config.hpp"
16
17 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
18 #include "allocator_traits.hpp"
19 #endif
20
21 namespace foonathan
22 {
23 namespace memory
24 {
25 namespace detail
26 {
27 struct virtual_memory_allocator_leak_handler
28 {
29 void operator()(std::ptrdiff_t amount);
30 };
31
32 FOONATHAN_MEMORY_GLOBAL_LEAK_CHECKER(virtual_memory_allocator_leak_handler,
33 virtual_memory_allocator_leak_checker)
34 } // namespace detail
35
36 /// The page size of the virtual memory.
37 /// All virtual memory allocations must be multiple of this size.
38 /// It is usually 4KiB.
39 /// \ingroup allocator
40 extern const std::size_t virtual_memory_page_size;
41
42 /// Reserves virtual memory.
43 /// \effects Reserves the given number of pages.
44 /// Each page is \ref virtual_memory_page_size big.
45 /// \returns The address of the first reserved page,
46 /// or \c nullptr in case of error.
47 /// \note The memory may not be used, it must first be commited.
48 /// \ingroup allocator
49 void* virtual_memory_reserve(std::size_t no_pages) noexcept;
50
51 /// Releases reserved virtual memory.
52 /// \effects Returns previously reserved pages to the system.
53 /// \requires \c pages must come from a previous call to \ref virtual_memory_reserve with the same \c calc_no_pages,
54 /// it must not be \c nullptr.
55 /// \ingroup allocator
56 void virtual_memory_release(void* pages, std::size_t no_pages) noexcept;
57
58 /// Commits reserved virtual memory.
59 /// \effects Marks \c calc_no_pages pages starting at the given address available for use.
60 /// \returns The beginning of the committed area, i.e. \c memory, or \c nullptr in case of error.
61 /// \requires The memory must be previously reserved.
62 /// \ingroup allocator
63 void* virtual_memory_commit(void* memory, std::size_t no_pages) noexcept;
64
65 /// Decommits commited virtual memory.
66 /// \effects Puts commited memory back in the reserved state.
67 /// \requires \c memory must come from a previous call to \ref virtual_memory_commit with the same \c calc_no_pages
68 /// it must not be \c nullptr.
69 /// \ingroup allocator
70 void virtual_memory_decommit(void* memory, std::size_t no_pages) noexcept;
71
72 /// A stateless \concept{concept_rawallocator,RawAllocator} that allocates memory using the virtual memory allocation functions.
73 /// It does not prereserve any memory and will always reserve and commit combined.
74 /// \ingroup allocator
75 class virtual_memory_allocator
76 : FOONATHAN_EBO(detail::global_leak_checker<detail::virtual_memory_allocator_leak_handler>)
77 {
78 public:
79 using is_stateful = std::false_type;
80
81 virtual_memory_allocator() noexcept = default;
82 virtual_memory_allocator(virtual_memory_allocator&&) noexcept {}
83 ~virtual_memory_allocator() noexcept = default;
84
85 virtual_memory_allocator& operator=(virtual_memory_allocator&&) noexcept
86 {
87 return *this;
88 }
89
90 /// \effects A \concept{concept_rawallocator,RawAllocator} allocation function.
91 /// It uses \ref virtual_memory_reserve followed by \ref virtual_memory_commit for the allocation.
92 /// The number of pages allocated will be the minimum to hold \c size continuous bytes,
93 /// i.e. \c size will be rounded up to the next multiple.
94 /// If debug fences are activated, one additional page before and after the memory will be allocated.
95 /// \returns A pointer to a \concept{concept_node,node}, it will never be \c nullptr.
96 /// It will always be aligned on a fence boundary, regardless of the alignment parameter.
97 /// \throws An exception of type \ref out_of_memory or whatever is thrown by its handler if the allocation fails.
98 void* allocate_node(std::size_t size, std::size_t alignment);
99
100 /// \effects A \concept{concept_rawallocator,RawAllocator} deallocation function.
101 /// It calls \ref virtual_memory_decommit followed by \ref virtual_memory_release for the deallocation.
102 void deallocate_node(void* node, std::size_t size, std::size_t alignment) noexcept;
103
104 /// \returns The maximum node size by returning the maximum value.
105 std::size_t max_node_size() const noexcept;
106
107 /// \returns The maximum alignment which is the same as the \ref virtual_memory_page_size.
108 std::size_t max_alignment() const noexcept;
109 };
110
111 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
112 extern template class allocator_traits<virtual_memory_allocator>;
113 #endif
114
115 struct memory_block;
116 struct allocator_info;
117
118 /// A \concept{concept_blockallocator,BlockAllocator} that reserves virtual memory and commits it part by part.
119 /// It is similar to \ref memory_stack but does not support growing and uses virtual memory,
120 /// also meant for big blocks not small allocations.
121 /// \ingroup allocator
122 class virtual_block_allocator
123 {
124 public:
125 /// \effects Creates it giving it the block size and the total number of blocks it can allocate.
126 /// It reserves enough virtual memory for <tt>block_size * no_blocks</tt>.
127 /// \requires \c block_size must be non-zero and a multiple of the \ref virtual_memory_page_size.
128 /// \c no_blocks must be bigger than \c 1.
129 /// \throws \ref out_of_memory if it cannot reserve the virtual memory.
130 explicit virtual_block_allocator(std::size_t block_size, std::size_t no_blocks);
131
132 /// \effects Releases the reserved virtual memory.
133 ~virtual_block_allocator() noexcept;
134
135 /// @{
136 /// \effects Moves the block allocator, it transfers ownership over the reserved area.
137 /// This does not invalidate any memory blocks.
138 virtual_block_allocator(virtual_block_allocator&& other) noexcept
139 : cur_(other.cur_), end_(other.end_), block_size_(other.block_size_)
140 {
141 other.cur_ = other.end_ = nullptr;
142 other.block_size_ = 0;
143 }
144
145 virtual_block_allocator& operator=(virtual_block_allocator&& other) noexcept
146 {
147 virtual_block_allocator tmp(detail::move(other));
148 swap(*this, tmp);
149 return *this;
150 }
151 /// @}
152
153 /// \effects Swaps the ownership over the reserved memory.
154 /// This does not invalidate any memory blocks.
155 friend void swap(virtual_block_allocator& a, virtual_block_allocator& b) noexcept
156 {
157 detail::adl_swap(a.cur_, b.cur_);
158 detail::adl_swap(a.end_, b.end_);
159 detail::adl_swap(a.block_size_, b.block_size_);
160 }
161
162 /// \effects Allocates a new memory block by committing the next \ref next_block_size() number of bytes.
163 /// \returns The \ref memory_block committed.
164 /// \throws \ref out_of_memory if it cannot commit the memory or the \ref capacity_left() is exhausted.
165 memory_block allocate_block();
166
167 /// \effects Deallocates the last allocated memory block by decommitting it.
168 /// This block will be returned again on the next call to \ref allocate_block().
169 /// \requires \c block must be the current top block of the memory,
170 /// this is guaranteed by \ref memory_arena.
171 void deallocate_block(memory_block block) noexcept;
172
173 /// \returns The next block size, this is the block size of the constructor.
174 std::size_t next_block_size() const noexcept
175 {
176 return block_size_;
177 }
178
179 /// \returns The number of blocks that can be committed until it runs out of memory.
180 std::size_t capacity_left() const noexcept
181 {
182 return static_cast<std::size_t>(end_ - cur_) / block_size_;
183 }
184
185 private:
186 allocator_info info() noexcept;
187
188 char * cur_, *end_;
189 std::size_t block_size_;
190 };
191 } // namespace memory
192 } // namespace foonathan
193
194 #endif //FOONATHAN_MEMORY_VIRTUAL_MEMORY_HPP_INCLUDED
0 # Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 # This file is subject to the license terms in the LICENSE file
2 # found in the top-level directory of this distribution.
3
4 # builds actual library
5
6 set(header_path ${FOONATHAN_MEMORY_SOURCE_DIR}/include/foonathan/memory)
7 set(detail_header
8 ${header_path}/detail/align.hpp
9 ${header_path}/detail/assert.hpp
10 ${header_path}/detail/container_node_sizes.hpp
11 ${header_path}/detail/debug_helpers.hpp
12 ${header_path}/detail/ebo_storage.hpp
13 ${header_path}/detail/free_list.hpp
14 ${header_path}/detail/free_list_array.hpp
15 ${header_path}/detail/ilog2.hpp
16 ${header_path}/detail/lowlevel_allocator.hpp
17 ${header_path}/detail/memory_stack.hpp
18 ${header_path}/detail/small_free_list.hpp
19 ${header_path}/detail/utility.hpp)
20 set(header
21 ${header_path}/aligned_allocator.hpp
22 ${header_path}/allocator_storage.hpp
23 ${header_path}/allocator_traits.hpp
24 ${header_path}/config.hpp
25 ${header_path}/container.hpp
26 ${header_path}/debugging.hpp
27 ${header_path}/default_allocator.hpp
28 ${header_path}/deleter.hpp
29 ${header_path}/error.hpp
30 ${header_path}/fallback_allocator.hpp
31 ${header_path}/malloc_allocator.hpp
32 ${header_path}/heap_allocator.hpp
33 ${header_path}/iteration_allocator.hpp
34 ${header_path}/joint_allocator.hpp
35 ${header_path}/memory_arena.hpp
36 ${header_path}/memory_pool.hpp
37 ${header_path}/memory_pool_collection.hpp
38 ${header_path}/memory_pool_type.hpp
39 ${header_path}/memory_resource_adapter.hpp
40 ${header_path}/memory_stack.hpp
41 ${header_path}/namespace_alias.hpp
42 ${header_path}/new_allocator.hpp
43 ${header_path}/segregator.hpp
44 ${header_path}/smart_ptr.hpp
45 ${header_path}/static_allocator.hpp
46 ${header_path}/std_allocator.hpp
47 ${header_path}/temporary_allocator.hpp
48 ${header_path}/threading.hpp
49 ${header_path}/tracking.hpp
50 ${header_path}/virtual_memory.hpp
51 ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp)
52
53 set(src
54 detail/align.cpp
55 detail/debug_helpers.cpp
56 detail/assert.cpp
57 detail/free_list.cpp
58 detail/free_list_array.cpp
59 detail/free_list_utils.hpp
60 detail/small_free_list.cpp
61 debugging.cpp
62 error.cpp
63 heap_allocator.cpp
64 iteration_allocator.cpp
65 malloc_allocator.cpp
66 memory_arena.cpp
67 memory_pool.cpp
68 memory_pool_collection.cpp
69 memory_stack.cpp
70 new_allocator.cpp
71 static_allocator.cpp
72 temporary_allocator.cpp
73 virtual_memory.cpp)
74
75 # configure config file
76 configure_file("config.hpp.in" "${CMAKE_CURRENT_BINARY_DIR}/config_impl.hpp")
77
78 # generate container_node_sizes.hpp
79 # don't run it while cross-compiling and CMAKE_CROSSCOMPILING_EMULATOR is not defined
80 if(FOONATHAN_MEMORY_BUILD_TOOLS)
81 if(NOT CMAKE_CROSSCOMPILING)
82 add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp
83 COMMAND $<TARGET_FILE:foonathan_memory_node_size_debugger> --code ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp
84 DEPENDS foonathan_memory_node_size_debugger
85 VERBATIM)
86 elseif(DEFINED CMAKE_CROSSCOMPILING_EMULATOR)
87 add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp
88 COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:foonathan_memory_node_size_debugger> --code ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp
89 DEPENDS foonathan_memory_node_size_debugger
90 VERBATIM)
91 elseif(QNX OR QNXNTO)
92 if(EXISTS "${FOONATHAN_MEMORY_CONTAINER_NODE_SIZES_IMPL}")
93 message("-- Using the pre-generated file: ${FOONATHAN_MEMORY_CONTAINER_NODE_SIZES_IMPL}")
94 add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp
95 COMMAND cp ${FOONATHAN_MEMORY_CONTAINER_NODE_SIZES_IMPL} ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp )
96 else()
97 message(FATAL_ERROR "\nError: Cannot find pre-generated file container_node_sizes_impl.hpp\n"
98 "Please pre-generate the header file container_node_sizes_impl.hpp by following the steps below:\n"
99 "- Build nodesize_dbg from source:\n"
100 " ${PROJECT_SOURCE_DIR}/tool/node_size_debugger.cpp \n"
101 "- Transfer nodesize_dbg to QNX target and execute:\n"
102 " nodesize_dbg --code container_node_sizes_impl.hpp \n"
103 "- Transfer generated header file back to your development system \n"
104 "- Set FOONATHAN_MEMORY_CONTAINER_NODE_SIZES_IMPL to the path of the pre-generated file and pass it to cmake as an argument\n")
105 endif()
106 else()
107 message(WARNING "cross-compiling, but emulator is not defined, "
108 "cannot generate container_node_sizes_impl.hpp, node size information will be unavailable")
109 file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp "#define FOONATHAN_MEMORY_NO_NODE_SIZE")
110 set(FOONATHAN_MEMORY_NO_NODE_SIZE 1 PARENT_SCOPE)
111 endif()
112 else()
113 message(WARNING "cannot generate container_node_sizes_impl.hpp, node size information will be unavailable")
114 file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp "#define FOONATHAN_MEMORY_NO_NODE_SIZE")
115 set(FOONATHAN_MEMORY_NO_NODE_SIZE 1 PARENT_SCOPE)
116 endif()
117
118 add_library(foonathan_memory ${detail_header} ${header} ${src})
119 target_include_directories(foonathan_memory PUBLIC $<BUILD_INTERFACE:${FOONATHAN_MEMORY_SOURCE_DIR}/include/> # for client in subdirectory
120 $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}> # for generated files in build mode
121 $<INSTALL_INTERFACE:${FOONATHAN_MEMORY_INC_INSTALL_DIR}> # for client in install mode
122 PRIVATE ${header_path}) # for source files
123 target_compile_definitions(foonathan_memory PUBLIC
124 FOONATHAN_MEMORY=1
125 FOONATHAN_MEMORY_VERSION_MAJOR=${FOONATHAN_MEMORY_VERSION_MAJOR}
126 FOONATHAN_MEMORY_VERSION_MINOR=${FOONATHAN_MEMORY_VERSION_MINOR}
127 FOONATHAN_MEMORY_VERSION_PATCH=${FOONATHAN_MEMORY_VERSION_PATCH})
128 if(NOT MSVC)
129 target_compile_features(foonathan_memory PUBLIC cxx_constexpr)
130 endif()
131
132 set_target_properties(foonathan_memory PROPERTIES
133 OUTPUT_NAME "foonathan_memory-${FOONATHAN_MEMORY_VERSION}"
134 POSITION_INDEPENDENT_CODE ON)
135
136 install(TARGETS foonathan_memory EXPORT foonathan_memoryTargets
137 RUNTIME DESTINATION ${FOONATHAN_MEMORY_RUNTIME_INSTALL_DIR}
138 LIBRARY DESTINATION ${FOONATHAN_MEMORY_LIBRARY_INSTALL_DIR}
139 ARCHIVE DESTINATION ${FOONATHAN_MEMORY_ARCHIVE_INSTALL_DIR}
140 FRAMEWORK DESTINATION ${FOONATHAN_MEMORY_FRAMEWORK_INSTALL_DIR})
141
142 # Write/install version file
143 include(CMakePackageConfigHelpers)
144 set(version_file "${CMAKE_CURRENT_BINARY_DIR}/cmake/foonathan_memory-config-version.cmake")
145 write_basic_package_version_file(${version_file}
146 VERSION ${FOONATHAN_MEMORY_VERSION}
147 COMPATIBILITY AnyNewerVersion)
148
149 install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config_impl.hpp DESTINATION ${FOONATHAN_MEMORY_INC_INSTALL_DIR})
150 install(FILES ${CMAKE_CURRENT_BINARY_DIR}/container_node_sizes_impl.hpp DESTINATION ${FOONATHAN_MEMORY_INC_INSTALL_DIR}/foonathan/memory/detail)
151 install(FILES ${header} DESTINATION ${FOONATHAN_MEMORY_INC_INSTALL_DIR}/foonathan/memory)
152 install(FILES ${detail_header} DESTINATION ${FOONATHAN_MEMORY_INC_INSTALL_DIR}/foonathan/memory/detail)
153 install(FILES ${version_file} DESTINATION ${FOONATHAN_MEMORY_CMAKE_CONFIG_INSTALL_DIR})
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_IMPL_IN_CONFIG_HPP
5 #error "do not include this file directly, use config.hpp"
6 #endif
7
8 #include <cstddef>
9
10 //=== options ===//
11 // clang-format off
12 #cmakedefine01 FOONATHAN_MEMORY_CHECK_ALLOCATION_SIZE
13 #define FOONATHAN_MEMORY_IMPL_DEFAULT_ALLOCATOR ${FOONATHAN_MEMORY_DEFAULT_ALLOCATOR}
14 #cmakedefine01 FOONATHAN_MEMORY_DEBUG_ASSERT
15 #cmakedefine01 FOONATHAN_MEMORY_DEBUG_FILL
16 #define FOONATHAN_MEMORY_DEBUG_FENCE ${FOONATHAN_MEMORY_DEBUG_FENCE}
17 #cmakedefine01 FOONATHAN_MEMORY_DEBUG_LEAK_CHECK
18 #cmakedefine01 FOONATHAN_MEMORY_DEBUG_POINTER_CHECK
19 #cmakedefine01 FOONATHAN_MEMORY_DEBUG_DOUBLE_DEALLOC_CHECK
20 #cmakedefine01 FOONATHAN_MEMORY_EXTERN_TEMPLATE
21 #define FOONATHAN_MEMORY_TEMPORARY_STACK_MODE ${FOONATHAN_MEMORY_TEMPORARY_STACK_MODE}
22 // clang-format on
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "debugging.hpp"
5
6 #if FOONATHAN_HOSTED_IMPLEMENTATION
7 #include <cstdio>
8 #endif
9
10 #include <atomic>
11 #include <cstdlib>
12
13 #include "error.hpp"
14
15 using namespace foonathan::memory;
16
17 namespace
18 {
19 void default_leak_handler(const allocator_info& info, std::ptrdiff_t amount) noexcept
20 {
21 #if FOONATHAN_HOSTED_IMPLEMENTATION
22 if (amount > 0)
23 std::fprintf(stderr, "[%s] Allocator %s (at %p) leaked %zu bytes.\n",
24 FOONATHAN_MEMORY_LOG_PREFIX, info.name, info.allocator,
25 std::size_t(amount));
26 else
27 std::fprintf(stderr,
28 "[%s] Allocator %s (at %p) has deallocated %zu bytes more than "
29 "ever allocated "
30 "(it's amazing you're able to see this message!).\n",
31 FOONATHAN_MEMORY_LOG_PREFIX, info.name, info.allocator,
32 std::size_t(-amount));
33 #else
34 (void)info;
35 (void)amount;
36 #endif
37 }
38
39 std::atomic<leak_handler> leak_h(default_leak_handler);
40 } // namespace
41
42 leak_handler foonathan::memory::set_leak_handler(leak_handler h)
43 {
44 return leak_h.exchange(h ? h : default_leak_handler);
45 }
46
47 leak_handler foonathan::memory::get_leak_handler()
48 {
49 return leak_h;
50 }
51
52 namespace
53 {
54 void default_invalid_ptr_handler(const allocator_info& info, const void* ptr) noexcept
55 {
56 #if FOONATHAN_HOSTED_IMPLEMENTATION
57 std::fprintf(stderr,
58 "[%s] Deallocation function of allocator %s (at %p) received invalid "
59 "pointer %p\n",
60 FOONATHAN_MEMORY_LOG_PREFIX, info.name, info.allocator, ptr);
61 #endif
62 (void)info;
63 (void)ptr;
64 std::abort();
65 }
66
67 std::atomic<invalid_pointer_handler> invalid_ptr_h(default_invalid_ptr_handler);
68 } // namespace
69
70 invalid_pointer_handler foonathan::memory::set_invalid_pointer_handler(invalid_pointer_handler h)
71 {
72 return invalid_ptr_h.exchange(h ? h : default_invalid_ptr_handler);
73 }
74
75 invalid_pointer_handler foonathan::memory::get_invalid_pointer_handler()
76 {
77 return invalid_ptr_h;
78 }
79
80 namespace
81 {
82 void default_buffer_overflow_handler(const void* memory, std::size_t node_size,
83 const void* ptr) noexcept
84 {
85 #if FOONATHAN_HOSTED_IMPLEMENTATION
86 std::fprintf(stderr,
87 "[%s] Buffer overflow at address %p detected, corresponding memory "
88 "block %p has only size %zu.",
89 FOONATHAN_MEMORY_LOG_PREFIX, ptr, memory, node_size);
90 #endif
91 (void)memory;
92 (void)node_size;
93 (void)ptr;
94 std::abort();
95 }
96
97 std::atomic<buffer_overflow_handler> buffer_overflow_h(default_buffer_overflow_handler);
98 } // namespace
99
100 buffer_overflow_handler foonathan::memory::set_buffer_overflow_handler(buffer_overflow_handler h)
101 {
102 return buffer_overflow_h.exchange(h ? h : default_buffer_overflow_handler);
103 }
104
105 buffer_overflow_handler foonathan::memory::get_buffer_overflow_handler()
106 {
107 return buffer_overflow_h;
108 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/align.hpp"
5
6 #include "detail/ilog2.hpp"
7
8 using namespace foonathan::memory;
9 using namespace detail;
10
11 bool foonathan::memory::detail::is_aligned(void* ptr, std::size_t alignment) noexcept
12 {
13 FOONATHAN_MEMORY_ASSERT(is_valid_alignment(alignment));
14 auto address = reinterpret_cast<std::uintptr_t>(ptr);
15 return address % alignment == 0u;
16 }
17
18 std::size_t foonathan::memory::detail::alignment_for(std::size_t size) noexcept
19 {
20 return size >= max_alignment ? max_alignment : (std::size_t(1) << ilog2(size));
21 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/assert.hpp"
5
6 #if FOONATHAN_HOSTED_IMPLEMENTATION
7 #include <cstdio>
8 #endif
9
10 #include <cstdlib>
11
12 #include "error.hpp"
13
14 using namespace foonathan::memory;
15 using namespace detail;
16
17 void detail::handle_failed_assert(const char* msg, const char* file, int line,
18 const char* fnc) noexcept
19 {
20 #if FOONATHAN_HOSTED_IMPLEMENTATION
21 std::fprintf(stderr, "[%s] Assertion failure in function %s (%s:%d): %s.\n",
22 FOONATHAN_MEMORY_LOG_PREFIX, fnc, file, line, msg);
23 #endif
24 std::abort();
25 }
26
27 void detail::handle_warning(const char* msg, const char* file, int line,
28 const char* fnc) noexcept
29 {
30 #if FOONATHAN_HOSTED_IMPLEMENTATION
31 std::fprintf(stderr, "[%s] Warning triggered in function %s (%s:%d): %s.\n",
32 FOONATHAN_MEMORY_LOG_PREFIX, fnc, file, line, msg);
33 #endif
34 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/debug_helpers.hpp"
5
6 #if FOONATHAN_HOSTED_IMPLEMENTATION
7 #include <cstring>
8 #endif
9
10 #include "debugging.hpp"
11
12 using namespace foonathan::memory;
13 using namespace detail;
14
15 #if FOONATHAN_MEMORY_DEBUG_FILL
16 void detail::debug_fill(void *memory, std::size_t size, debug_magic m) noexcept
17 {
18 #if FOONATHAN_HOSTED_IMPLEMENTATION
19 std::memset(memory, static_cast<int>(m), size);
20 #else
21 // do the naive loop :(
22 auto ptr = static_cast<unsigned char*>(memory);
23 for (std::size_t i = 0u; i != size; ++i)
24 *ptr++ = static_cast<unsigned char>(m);
25 #endif
26 }
27
28 void* detail::debug_is_filled(void *memory, std::size_t size, debug_magic m) noexcept
29 {
30 auto byte = static_cast<unsigned char*>(memory);
31 for (auto end = byte + size; byte != end; ++byte)
32 if (*byte != static_cast<unsigned char>(m))
33 return byte;
34 return nullptr;
35 }
36
37 void* detail::debug_fill_new(void *memory,
38 std::size_t node_size, std::size_t fence_size) noexcept
39 {
40 if (!debug_fence_size)
41 fence_size = 0u; // force override of fence_size
42
43 auto mem = static_cast<char*>(memory);
44 debug_fill(mem, fence_size, debug_magic::fence_memory);
45
46 mem += fence_size;
47 debug_fill(mem, node_size, debug_magic::new_memory);
48
49 debug_fill(mem + node_size, fence_size, debug_magic::fence_memory);
50
51 return mem;
52 }
53
54 void* detail::debug_fill_free(void *memory,
55 std::size_t node_size, std::size_t fence_size) noexcept
56 {
57 if (!debug_fence_size)
58 fence_size = 0u; // force override of fence_size
59
60 debug_fill(memory, node_size, debug_magic::freed_memory);
61
62 auto pre_fence = static_cast<unsigned char*>(memory) - fence_size;
63 if (auto pre_dirty = debug_is_filled(pre_fence, fence_size, debug_magic::fence_memory))
64 get_buffer_overflow_handler()(memory, node_size, pre_dirty);
65
66 auto post_mem = static_cast<unsigned char*>(memory) + node_size;
67 if (auto post_dirty = debug_is_filled(post_mem, fence_size, debug_magic::fence_memory))
68 get_buffer_overflow_handler()(memory, node_size, post_dirty);
69
70 return pre_fence;
71 }
72
73 void detail::debug_fill_internal(void *memory, std::size_t size, bool free) noexcept
74 {
75 debug_fill(memory, size, free ? debug_magic::internal_freed_memory : debug_magic::internal_memory);
76 }
77 #endif
78
79 void detail::debug_handle_invalid_ptr(const allocator_info &info, void *ptr)
80 {
81 get_invalid_pointer_handler()(info, ptr);
82 }
83
84 void detail::debug_handle_memory_leak(const allocator_info &info, std::ptrdiff_t amount)
85 {
86 get_leak_handler()(info, amount);
87 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/free_list.hpp"
5
6 #include "detail/align.hpp"
7 #include "detail/debug_helpers.hpp"
8 #include "detail/assert.hpp"
9 #include "debugging.hpp"
10 #include "error.hpp"
11
12 #include "free_list_utils.hpp"
13
14 using namespace foonathan::memory;
15 using namespace detail;
16
17 namespace
18 {
19 // i.e. array
20 struct interval
21 {
22 char* prev; // last before
23 char* first; // first in
24 char* last; // last in
25 char* next; // first after
26
27 // number of nodes in the interval
28 std::size_t size(std::size_t node_size) const noexcept
29 {
30 // last is inclusive, so add actual_size to it
31 // note: cannot use next, might not be directly after
32 auto end = last + node_size;
33 FOONATHAN_MEMORY_ASSERT((end - first) % node_size == 0u);
34 return (end - first) / node_size;
35 }
36 };
37
38 // searches for n consecutive bytes
39 // begin and end are the proxy nodes
40 // assumes list is not empty
41 // similar to list_search_array()
42 interval list_search_array(char* first, std::size_t bytes_needed,
43 std::size_t node_size) noexcept
44 {
45 interval i;
46 i.prev = nullptr;
47 i.first = first;
48 // i.last/next are used as iterator for the end of the interval
49 i.last = first;
50 i.next = list_get_next(first);
51
52 auto bytes_so_far = node_size;
53 while (i.next)
54 {
55 if (i.last + node_size != i.next) // not continous
56 {
57 // restart at next
58 i.prev = i.last;
59 i.first = i.next;
60 i.last = i.next;
61 i.next = list_get_next(i.last);
62
63 bytes_so_far = node_size;
64 }
65 else
66 {
67 // extend interval
68 auto new_next = list_get_next(i.next);
69 i.last = i.next;
70 i.next = new_next;
71
72 bytes_so_far += node_size;
73 if (bytes_so_far >= bytes_needed)
74 return i;
75 }
76 }
77 // not enough continuous space
78 return {nullptr, nullptr, nullptr, nullptr};
79 }
80
81 // similar to list_search_array()
82 // begin/end are proxy nodes
83 interval xor_list_search_array(char* begin, char* end, std::size_t bytes_needed,
84 std::size_t node_size) noexcept
85 {
86 interval i;
87 i.prev = begin;
88 i.first = xor_list_get_other(begin, nullptr);
89 // i.last/next are used as iterator for the end of the interval
90 i.last = i.first;
91 i.next = xor_list_get_other(i.last, i.prev);
92
93 auto bytes_so_far = node_size;
94 while (i.next != end)
95 {
96 if (i.last + node_size != i.next) // not continous
97 {
98 // restart at i.next
99 i.prev = i.last;
100 i.first = i.next;
101 i.last = i.next;
102 i.next = xor_list_get_other(i.first, i.prev);
103
104 bytes_so_far = node_size;
105 }
106 else
107 {
108 // extend interval
109 auto new_next = xor_list_get_other(i.next, i.last);
110 i.last = i.next;
111 i.next = new_next;
112
113 bytes_so_far += node_size;
114 if (bytes_so_far >= bytes_needed)
115 return i;
116 }
117 }
118 // not enough continuous space
119 return {nullptr, nullptr, nullptr, nullptr};
120 }
121 } // namespace
122
123 constexpr std::size_t free_memory_list::min_element_size;
124 constexpr std::size_t free_memory_list::min_element_alignment;
125
126 free_memory_list::free_memory_list(std::size_t node_size) noexcept
127 : first_(nullptr),
128 node_size_(node_size > min_element_size ? node_size : min_element_size),
129 capacity_(0u)
130 {
131 }
132
133 free_memory_list::free_memory_list(std::size_t node_size, void* mem,
134 std::size_t size) noexcept
135 : free_memory_list(node_size)
136 {
137 insert(mem, size);
138 }
139
140 free_memory_list::free_memory_list(free_memory_list&& other) noexcept
141 : first_(other.first_),
142 node_size_(other.node_size_),
143 capacity_(other.capacity_)
144 {
145 other.first_ = nullptr;
146 other.capacity_ = 0u;
147 }
148
149 free_memory_list& free_memory_list::operator=(free_memory_list&& other) noexcept
150 {
151 free_memory_list tmp(detail::move(other));
152 swap(*this, tmp);
153 return *this;
154 }
155
156 void foonathan::memory::detail::swap(free_memory_list& a, free_memory_list& b) noexcept
157 {
158 detail::adl_swap(a.first_, b.first_);
159 detail::adl_swap(a.node_size_, b.node_size_);
160 detail::adl_swap(a.capacity_, b.capacity_);
161 }
162
163 void free_memory_list::insert(void* mem, std::size_t size) noexcept
164 {
165 FOONATHAN_MEMORY_ASSERT(mem);
166 FOONATHAN_MEMORY_ASSERT(is_aligned(mem, alignment()));
167 detail::debug_fill_internal(mem, size, false);
168
169 insert_impl(mem, size);
170 }
171
172 void* free_memory_list::allocate() noexcept
173 {
174 FOONATHAN_MEMORY_ASSERT(!empty());
175 --capacity_;
176
177 auto mem = first_;
178 first_ = list_get_next(first_);
179 return debug_fill_new(mem, node_size_, fence_size());
180 }
181
182 void* free_memory_list::allocate(std::size_t n) noexcept
183 {
184 FOONATHAN_MEMORY_ASSERT(!empty());
185 if (n <= node_size_)
186 return allocate();
187
188 auto actual_size = node_size_ + 2 * fence_size();
189
190 auto i = list_search_array(first_, n + 2 * fence_size(), actual_size);
191 if (i.first == nullptr)
192 return nullptr;
193
194 if (i.prev)
195 list_set_next(i.prev, i.next); // change next from previous to first after
196 else
197 first_ = i.next;
198 capacity_ -= i.size(actual_size);
199
200 return debug_fill_new(i.first, n, fence_size());
201 }
202
203 void free_memory_list::deallocate(void* ptr) noexcept
204 {
205 ++capacity_;
206
207 auto node = static_cast<char*>(debug_fill_free(ptr, node_size_, fence_size()));
208 list_set_next(node, first_);
209 first_ = node;
210 }
211
212 void free_memory_list::deallocate(void* ptr, std::size_t n) noexcept
213 {
214 if (n <= node_size_)
215 deallocate(ptr);
216 else
217 {
218 auto mem = debug_fill_free(ptr, n, fence_size());
219 insert_impl(mem, n + 2 * fence_size());
220 }
221 }
222
223 std::size_t free_memory_list::alignment() const noexcept
224 {
225 return alignment_for(node_size_);
226 }
227
228 std::size_t free_memory_list::fence_size() const noexcept
229 {
230 // fence size is max alignment
231 return debug_fence_size ? max_alignment : 0u;
232 }
233
234 void free_memory_list::insert_impl(void* mem, std::size_t size) noexcept
235 {
236 auto actual_size = node_size_ + 2 * fence_size();
237 auto no_nodes = size / actual_size;
238 FOONATHAN_MEMORY_ASSERT(no_nodes > 0);
239
240 auto cur = static_cast<char*>(mem);
241 for (std::size_t i = 0u; i != no_nodes - 1; ++i)
242 {
243 list_set_next(cur, cur + actual_size);
244 cur += actual_size;
245 }
246 list_set_next(cur, first_);
247 first_ = static_cast<char*>(mem);
248
249 capacity_ += no_nodes;
250 }
251
252 namespace
253 {
254 // converts a block into a linked list
255 void xor_link_block(void* memory, std::size_t node_size, std::size_t no_nodes, char* prev,
256 char* next) noexcept
257 {
258 auto cur = static_cast<char*>(memory);
259 xor_list_change(prev, next, cur); // change next pointer of prev
260
261 auto last_cur = prev;
262 for (std::size_t i = 0u; i != no_nodes - 1; ++i)
263 {
264 xor_list_set(cur, last_cur,
265 cur + node_size); // cur gets last_cur and next node in continous memory
266 last_cur = cur;
267 cur += node_size;
268 }
269 xor_list_set(cur, last_cur, next); // last memory node gets next as next
270 xor_list_change(next, prev, cur); // change prev pointer of next
271 }
272
273 struct pos
274 {
275 char *prev, *next;
276 };
277
278 // finds position to insert memory to keep list ordered
279 // first_prev -> first -> ... (memory somewhere here) ... -> last -> last_next
280 pos find_pos_interval(const allocator_info& info, char* memory, char* first_prev, char* first,
281 char* last, char* last_next) noexcept
282 {
283 // note: first_prev/last_next can be the proxy nodes, then first_prev isn't necessarily less than first!
284 FOONATHAN_MEMORY_ASSERT(less(first, memory) && less(memory, last));
285
286 // need to insert somewhere in the middle
287 // search through the entire list
288 // search from both ends at once
289 auto cur_forward = first;
290 auto prev_forward = first_prev;
291
292 auto cur_backward = last;
293 auto prev_backward = last_next;
294
295 do
296 {
297 if (greater(cur_forward, memory))
298 return {prev_forward, cur_forward};
299 else if (less(cur_backward, memory))
300 // the next position is the previous backwards pointer
301 return {cur_backward, prev_backward};
302 debug_check_double_dealloc(
303 [&] { return cur_forward != memory && cur_backward != memory; }, info, memory);
304 xor_list_iter_next(cur_forward, prev_forward);
305 xor_list_iter_next(cur_backward, prev_backward);
306 } while (less(prev_forward, prev_backward));
307
308 // ran outside of list
309 debug_check_double_dealloc([] { return false; }, info, memory);
310 return {nullptr, nullptr};
311 }
312
313 // finds the position in the entire list
314 pos find_pos(const allocator_info& info, char* memory, char* begin_node, char* end_node,
315 char* last_dealloc, char* last_dealloc_prev) noexcept
316 {
317 auto first = xor_list_get_other(begin_node, nullptr);
318 auto last = xor_list_get_other(end_node, nullptr);
319
320 if (greater(first, memory))
321 // insert at front
322 return {begin_node, first};
323 else if (less(last, memory))
324 // insert at the end
325 return {last, end_node};
326 else if (less(last_dealloc_prev, memory) && less(memory, last_dealloc))
327 // insert before last_dealloc
328 return {last_dealloc_prev, last_dealloc};
329 else if (less(memory, last_dealloc))
330 // insert into [first, last_dealloc_prev]
331 return find_pos_interval(info, memory, begin_node, first, last_dealloc_prev,
332 last_dealloc);
333 else if (greater(memory, last_dealloc))
334 // insert into (last_dealloc, last]
335 return find_pos_interval(info, memory, last_dealloc_prev, last_dealloc, last, end_node);
336
337 FOONATHAN_MEMORY_UNREACHABLE("memory must be in some half or outside");
338 return {nullptr, nullptr};
339 }
340 } // namespace
341
342 constexpr std::size_t ordered_free_memory_list::min_element_size;
343 constexpr std::size_t ordered_free_memory_list::min_element_alignment;
344
345 ordered_free_memory_list::ordered_free_memory_list(std::size_t node_size) noexcept
346 : node_size_(node_size > min_element_size ? node_size : min_element_size),
347 capacity_(0u),
348 last_dealloc_(end_node()),
349 last_dealloc_prev_(begin_node())
350 {
351 xor_list_set(begin_node(), nullptr, end_node());
352 xor_list_set(end_node(), begin_node(), nullptr);
353 }
354
355 ordered_free_memory_list::ordered_free_memory_list(ordered_free_memory_list&& other)
356 noexcept : node_size_(other.node_size_),
357 capacity_(other.capacity_)
358 {
359 if (!other.empty())
360 {
361 auto first = xor_list_get_other(other.begin_node(), nullptr);
362 auto last = xor_list_get_other(other.end_node(), nullptr);
363
364 xor_list_set(begin_node(), nullptr, first);
365 xor_list_change(first, other.begin_node(), begin_node());
366 xor_list_change(last, other.end_node(), end_node());
367 xor_list_set(end_node(), last, nullptr);
368
369 other.capacity_ = 0u;
370 xor_list_set(other.begin_node(), nullptr, other.end_node());
371 xor_list_set(other.end_node(), other.begin_node(), nullptr);
372 }
373 else
374 {
375 xor_list_set(begin_node(), nullptr, end_node());
376 xor_list_set(end_node(), begin_node(), nullptr);
377 }
378
379 // for programming convenience, last_dealloc is reset
380 last_dealloc_prev_ = begin_node();
381 last_dealloc_ = xor_list_get_other(last_dealloc_prev_, nullptr);
382 }
383
384 void foonathan::memory::detail::swap(ordered_free_memory_list& a,
385 ordered_free_memory_list& b) noexcept
386 {
387 auto a_first = xor_list_get_other(a.begin_node(), nullptr);
388 auto a_last = xor_list_get_other(a.end_node(), nullptr);
389
390 auto b_first = xor_list_get_other(b.begin_node(), nullptr);
391 auto b_last = xor_list_get_other(b.end_node(), nullptr);
392
393 if (!a.empty())
394 {
395 xor_list_set(b.begin_node(), nullptr, a_first);
396 xor_list_change(a_first, a.begin_node(), b.begin_node());
397 xor_list_change(a_last, a.end_node(), b.end_node());
398 xor_list_set(b.end_node(), a_last, nullptr);
399 }
400 else
401 {
402 xor_list_set(b.begin_node(), nullptr, b.end_node());
403 xor_list_set(b.end_node(), b.begin_node(), nullptr);
404 }
405
406 if (!b.empty())
407 {
408 xor_list_set(a.begin_node(), nullptr, b_first);
409 xor_list_change(b_first, b.begin_node(), a.begin_node());
410 xor_list_change(b_last, b.end_node(), a.end_node());
411 xor_list_set(a.end_node(), b_last, nullptr);
412 }
413 else
414 {
415 xor_list_set(a.begin_node(), nullptr, a.end_node());
416 xor_list_set(a.end_node(), a.begin_node(), nullptr);
417 }
418
419 detail::adl_swap(a.node_size_, b.node_size_);
420 detail::adl_swap(a.capacity_, b.capacity_);
421
422 // for programming convenience, last_dealloc is reset
423 a.last_dealloc_prev_ = a.begin_node();
424 a.last_dealloc_ = xor_list_get_other(a.last_dealloc_prev_, nullptr);
425
426 b.last_dealloc_prev_ = b.begin_node();
427 b.last_dealloc_ = xor_list_get_other(b.last_dealloc_prev_, nullptr);
428 }
429
430 void ordered_free_memory_list::insert(void* mem, std::size_t size) noexcept
431 {
432 FOONATHAN_MEMORY_ASSERT(mem);
433 FOONATHAN_MEMORY_ASSERT(is_aligned(mem, alignment()));
434 debug_fill_internal(mem, size, false);
435
436 insert_impl(mem, size);
437 }
438
439 void* ordered_free_memory_list::allocate() noexcept
440 {
441 FOONATHAN_MEMORY_ASSERT(!empty());
442
443 // remove first node
444 auto prev = begin_node();
445 auto node = xor_list_get_other(prev, nullptr);
446 auto next = xor_list_get_other(node, prev);
447
448 xor_list_set(prev, nullptr, next); // link prev to next
449 xor_list_change(next, node, prev); // change prev of next
450 --capacity_;
451
452 if (node == last_dealloc_)
453 {
454 // move last_dealloc_ one further in
455 last_dealloc_ = next;
456 FOONATHAN_MEMORY_ASSERT(last_dealloc_prev_ == prev);
457 }
458
459 return debug_fill_new(node, node_size_, fence_size());
460 }
461
462 void* ordered_free_memory_list::allocate(std::size_t n) noexcept
463 {
464 FOONATHAN_MEMORY_ASSERT(!empty());
465
466 if (n <= node_size_)
467 return allocate();
468
469 auto actual_size = node_size_ + 2 * fence_size();
470
471 auto i = xor_list_search_array(begin_node(), end_node(), n + 2 * fence_size(), actual_size);
472 if (i.first == nullptr)
473 return nullptr;
474
475 xor_list_change(i.prev, i.first, i.next); // change next pointer from i.prev to i.next
476 xor_list_change(i.next, i.last, i.prev); // change prev pointer from i.next to i.prev
477 capacity_ -= i.size(actual_size);
478
479 // if last_dealloc_ points into the array being removed
480 if (less_equal(i.first, last_dealloc_) && less_equal(last_dealloc_, i.last))
481 {
482 // move last_dealloc just outside range
483 last_dealloc_ = i.next;
484 last_dealloc_prev_ = i.prev;
485 }
486
487 return debug_fill_new(i.first, n, fence_size());
488 }
489
490 void ordered_free_memory_list::deallocate(void* ptr) noexcept
491 {
492 auto node = static_cast<char*>(debug_fill_free(ptr, node_size_, fence_size()));
493
494 auto p =
495 find_pos(allocator_info(FOONATHAN_MEMORY_LOG_PREFIX "::detail::ordered_free_memory_list",
496 this),
497 node, begin_node(), end_node(), last_dealloc_, last_dealloc_prev_);
498
499 xor_list_insert(node, p.prev, p.next);
500 ++capacity_;
501
502 last_dealloc_ = node;
503 last_dealloc_prev_ = p.prev;
504 }
505
506 void ordered_free_memory_list::deallocate(void* ptr, std::size_t n) noexcept
507 {
508 if (n <= node_size_)
509 deallocate(ptr);
510 else
511 {
512 auto mem = debug_fill_free(ptr, n, fence_size());
513 auto prev = insert_impl(mem, n + 2 * fence_size());
514
515 last_dealloc_ = static_cast<char*>(mem);
516 last_dealloc_prev_ = prev;
517 }
518 }
519
520 std::size_t ordered_free_memory_list::alignment() const noexcept
521 {
522 return alignment_for(node_size_);
523 }
524
525 std::size_t ordered_free_memory_list::fence_size() const noexcept
526 {
527 // node size is fence size
528 return debug_fence_size ? node_size_ : 0u;
529 }
530
531 char* ordered_free_memory_list::insert_impl(void* mem, std::size_t size) noexcept
532 {
533 auto actual_size = node_size_ + 2 * fence_size();
534 auto no_nodes = size / actual_size;
535 FOONATHAN_MEMORY_ASSERT(no_nodes > 0);
536
537 auto p =
538 find_pos(allocator_info(FOONATHAN_MEMORY_LOG_PREFIX "::detail::ordered_free_memory_list",
539 this),
540 static_cast<char*>(mem), begin_node(), end_node(), last_dealloc_,
541 last_dealloc_prev_);
542
543 xor_link_block(mem, actual_size, no_nodes, p.prev, p.next);
544 capacity_ += no_nodes;
545
546 if (p.prev == last_dealloc_prev_)
547 {
548 last_dealloc_ = static_cast<char*>(mem);
549 }
550
551 return p.prev;
552 }
553
554 char* ordered_free_memory_list::begin_node() noexcept
555 {
556 void* mem = &begin_proxy_;
557 return static_cast<char*>(mem);
558 }
559
560 char* ordered_free_memory_list::end_node() noexcept
561 {
562 void* mem = &end_proxy_;
563 return static_cast<char*>(mem);
564 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/free_list_array.hpp"
5
6 #include "detail/assert.hpp"
7 #include "detail/ilog2.hpp"
8
9 using namespace foonathan::memory;
10 using namespace detail;
11
12 std::size_t log2_access_policy::index_from_size(std::size_t size) noexcept
13 {
14 FOONATHAN_MEMORY_ASSERT_MSG(size, "size must not be zero");
15 return ilog2_ceil(size);
16 }
17
18 std::size_t log2_access_policy::size_from_index(std::size_t index) noexcept
19 {
20 return std::size_t(1) << index;
21 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_SRC_DETAIL_FREE_LIST_UTILS_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_SRC_DETAIL_FREE_LIST_UTILS_HPP_INCLUDED
6
7 #include <cstdint>
8
9 #include "config.hpp"
10 #include "detail/align.hpp"
11 #include "detail/assert.hpp"
12
13 #if FOONATHAN_HOSTED_IMPLEMENTATION
14 #include <cstring>
15 #include <functional>
16 #endif
17
18 namespace foonathan { namespace memory
19 {
20 namespace detail
21 {
22 //=== storage ===///
23 // reads stored integer value
24 inline std::uintptr_t get_int(void *address) noexcept
25 {
26 FOONATHAN_MEMORY_ASSERT(address);
27 std::uintptr_t res;
28 #if FOONATHAN_HOSTED_IMPLEMENTATION
29 std::memcpy(&res, address, sizeof(std::uintptr_t));
30 #else
31 auto mem = static_cast<char*>(static_cast<void*>(&res));
32 for (auto i = 0u; i != sizeof(std::uintptr_t); ++i)
33 mem[i] = static_cast<char*>(address)[i];
34 #endif
35 return res;
36 }
37
38 // sets stored integer value
39 inline void set_int(void *address, std::uintptr_t i) noexcept
40 {
41 FOONATHAN_MEMORY_ASSERT(address);
42 #if FOONATHAN_HOSTED_IMPLEMENTATION
43 std::memcpy(address, &i, sizeof(std::uintptr_t));
44 #else
45 auto mem = static_cast<char*>(static_cast<void*>(&i));
46 for (auto i = 0u; i != sizeof(std::uintptr_t); ++i)
47 static_cast<char*>(address)[i] = mem[i];
48 #endif
49 }
50
51 // pointer to integer
52 inline std::uintptr_t to_int(char *ptr) noexcept
53 {
54 return reinterpret_cast<std::uintptr_t>(ptr);
55 }
56
57 // integer to pointer
58 inline char *from_int(std::uintptr_t i) noexcept
59 {
60 return reinterpret_cast<char *>(i);
61 }
62
63 //=== intrusive linked list ===//
64 // reads a stored pointer value
65 inline char *list_get_next(void *address) noexcept
66 {
67 return from_int(get_int(address));
68 }
69
70 // stores a pointer value
71 inline void list_set_next(void *address, char *ptr) noexcept
72 {
73 set_int(address, to_int(ptr));
74 }
75
76 //=== intrusive xor linked list ===//
77 // returns the other pointer given one pointer
78 inline char *xor_list_get_other(void *address, char *prev_or_next) noexcept
79 {
80 return from_int(get_int(address) ^ to_int(prev_or_next));
81 }
82
83 // sets the next and previous pointer (order actually does not matter)
84 inline void xor_list_set(void *address, char *prev, char *next) noexcept
85 {
86 set_int(address, to_int(prev) ^ to_int(next));
87 }
88
89 // changes other pointer given one pointer
90 inline void xor_list_change(void *address, char *old_ptr, char *new_ptr) noexcept
91 {
92 FOONATHAN_MEMORY_ASSERT(address);
93 auto other = xor_list_get_other(address, old_ptr);
94 xor_list_set(address, other, new_ptr);
95 }
96
97 // advances a pointer pair forward/backward
98 inline void xor_list_iter_next(char *&cur, char *&prev) noexcept
99 {
100 auto next = xor_list_get_other(cur, prev);
101 prev = cur;
102 cur = next;
103 }
104
105 // links new node between prev and next
106 inline void xor_list_insert(char *new_node, char *prev, char *next) noexcept
107 {
108 xor_list_set(new_node, prev, next);
109 xor_list_change(prev, next, new_node); // change prev's next to new_node
110 xor_list_change(next, prev, new_node); // change next's prev to new_node
111 }
112
113 //=== sorted list utils ===//
114 // if std::less/std::greater not available compare integer representation and hope it works
115 inline bool less(void *a, void *b) noexcept
116 {
117 #if FOONATHAN_HOSTED_IMPLEMENTATION
118 return std::less<void*>()(a, b);
119 #else
120 return to_int(a) < to_int(b);
121 #endif
122 }
123
124 inline bool less_equal(void *a, void *b) noexcept
125 {
126 return a == b || less(a, b);
127 }
128
129 inline bool greater(void *a, void *b) noexcept
130 {
131 #if FOONATHAN_HOSTED_IMPLEMENTATION
132 return std::greater<void*>()(a, b);
133 #else
134 return to_int(a) < to_int(b);
135 #endif
136 }
137
138 inline bool greater_equal(void *a, void *b) noexcept
139 {
140 return a == b || greater(a, b);
141 }
142 } // namespace detail
143 }} // namespace foonathan::memory
144
145 #endif // FOONATHAN_MEMORY_SRC_DETAIL_FREE_LIST_UTILS_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/small_free_list.hpp"
5
6 #include <limits>
7 #include <new>
8
9 #include "detail/align.hpp"
10 #include "detail/debug_helpers.hpp"
11 #include "detail/assert.hpp"
12 #include "error.hpp"
13
14 #include "free_list_utils.hpp"
15
16 using namespace foonathan::memory;
17 using namespace detail;
18
19 struct foonathan::memory::detail::chunk : chunk_base
20 {
21 static const std::size_t memory_offset;
22 static const std::size_t max_nodes;
23
24 // gives it the size of the memory block it is created in and the size of a node
25 chunk(std::size_t total_memory, std::size_t node_size) noexcept
26 : chunk_base(static_cast<unsigned char>((total_memory - memory_offset) / node_size))
27 {
28 FOONATHAN_MEMORY_ASSERT((total_memory - memory_offset) / node_size <= max_nodes);
29 FOONATHAN_MEMORY_ASSERT(capacity > 0);
30 auto p = list_memory();
31 for (unsigned char i = 0u; i != no_nodes; p += node_size)
32 *p = ++i;
33 }
34
35 // returns memory of the free list
36 unsigned char* list_memory() noexcept
37 {
38 auto mem = static_cast<void*>(this);
39 return static_cast<unsigned char*>(mem) + memory_offset;
40 }
41
42 // returns the nth node
43 unsigned char* node_memory(unsigned char i, std::size_t node_size) noexcept
44 {
45 FOONATHAN_MEMORY_ASSERT(i < no_nodes);
46 return list_memory() + i * node_size;
47 }
48
49 // checks whether a node came from this chunk
50 bool from(unsigned char* node, std::size_t node_size) noexcept
51 {
52 auto begin = list_memory();
53 auto end = list_memory() + no_nodes * node_size;
54 return (begin <= node) & (node < end);
55 }
56
57 // checks whether a node is already in this chunk
58 bool contains(unsigned char* node, std::size_t node_size) noexcept
59 {
60 auto cur_index = first_free;
61 while (cur_index != no_nodes)
62 {
63 auto cur_mem = node_memory(cur_index, node_size);
64 if (cur_mem == node)
65 return true;
66 cur_index = *cur_mem;
67 }
68 return false;
69 }
70
71 // allocates a single node
72 // chunk most not be empty
73 unsigned char* allocate(std::size_t node_size) noexcept
74 {
75 --capacity;
76
77 auto node = node_memory(first_free, node_size);
78 first_free = *node;
79 return node;
80 }
81
82 // deallocates a single node given its address and index
83 // it must be from this chunk
84 void deallocate(unsigned char* node, unsigned char node_index) noexcept
85 {
86 ++capacity;
87
88 *node = first_free;
89 first_free = node_index;
90 }
91 };
92
93 const std::size_t chunk::memory_offset =
94 sizeof(chunk) % detail::max_alignment == 0 ?
95 sizeof(chunk) :
96 (sizeof(chunk) / detail::max_alignment + 1) * detail::max_alignment;
97 const std::size_t chunk::max_nodes = std::numeric_limits<unsigned char>::max();
98
99 namespace
100 {
101 // converts a chunk_base to a chunk (if it is one)
102 chunk* make_chunk(chunk_base* c) noexcept
103 {
104 return static_cast<chunk*>(c);
105 }
106
107 // same as above but also requires a certain size
108 chunk* make_chunk(chunk_base* c, std::size_t size_needed) noexcept
109 {
110 FOONATHAN_MEMORY_ASSERT(size_needed <= std::numeric_limits<unsigned char>::max());
111 return c->capacity >= size_needed ? make_chunk(c) : nullptr;
112 }
113
114 // checks if memory was from a chunk, assumes chunk isn't proxy
115 chunk* from_chunk(chunk_base* c, unsigned char* node, std::size_t node_size) noexcept
116 {
117 auto res = make_chunk(c);
118 return res->from(node, node_size) ? res : nullptr;
119 }
120
121 // inserts already interconnected chunks into the list
122 // list will be kept ordered
123 void insert_chunks(chunk_base* list, chunk_base* begin, chunk_base* end) noexcept
124 {
125 FOONATHAN_MEMORY_ASSERT(begin && end);
126
127 if (list->next == list) // empty
128 {
129 begin->prev = list;
130 end->next = list->next;
131 list->next = begin;
132 list->prev = end;
133 }
134 else if (less(list->prev, begin)) // insert at end
135 {
136 list->prev->next = begin;
137 begin->prev = list->prev;
138 end->next = list;
139 list->prev = end;
140 }
141 else
142 {
143 auto prev = list;
144 auto cur = list->next;
145 while (less(cur, begin))
146 {
147 prev = cur;
148 cur = cur->next;
149 }
150 FOONATHAN_MEMORY_ASSERT(greater(cur, end));
151 FOONATHAN_MEMORY_ASSERT(prev == list || less(prev, begin));
152 prev->next = begin;
153 begin->prev = prev;
154 end->next = cur;
155 cur->prev = end;
156 }
157 }
158 } // namespace
159
160 constexpr std::size_t small_free_memory_list::min_element_size;
161 constexpr std::size_t small_free_memory_list::min_element_alignment;
162
163 small_free_memory_list::small_free_memory_list(std::size_t node_size) noexcept
164 : node_size_(node_size),
165 capacity_(0u),
166 alloc_chunk_(&base_),
167 dealloc_chunk_(&base_)
168 {
169 }
170
171 small_free_memory_list::small_free_memory_list(std::size_t node_size, void* mem,
172 std::size_t size) noexcept
173 : small_free_memory_list(node_size)
174 {
175 insert(mem, size);
176 }
177
178 small_free_memory_list::small_free_memory_list(small_free_memory_list&& other) noexcept
179 : node_size_(other.node_size_),
180 capacity_(other.capacity_),
181 // reset markers for simplicity
182 alloc_chunk_(&base_),
183 dealloc_chunk_(&base_)
184 {
185 if (!other.empty())
186 {
187 base_.next = other.base_.next;
188 base_.prev = other.base_.prev;
189 other.base_.next->prev = &base_;
190 other.base_.prev->next = &base_;
191
192 other.base_.next = &other.base_;
193 other.base_.prev = &other.base_;
194 other.capacity_ = 0u;
195 }
196 else
197 {
198 base_.next = &base_;
199 base_.prev = &base_;
200 }
201 }
202
203 void foonathan::memory::detail::swap(small_free_memory_list& a,
204 small_free_memory_list& b) noexcept
205 {
206 auto b_next = b.base_.next;
207 auto b_prev = b.base_.prev;
208
209 if (!a.empty())
210 {
211 b.base_.next = a.base_.next;
212 b.base_.prev = a.base_.prev;
213 b.base_.next->prev = &b.base_;
214 b.base_.prev->next = &b.base_;
215 }
216 else
217 {
218 b.base_.next = &b.base_;
219 b.base_.prev = &b.base_;
220 }
221
222 if (!b.empty())
223 {
224 a.base_.next = b_next;
225 a.base_.prev = b_prev;
226 a.base_.next->prev = &a.base_;
227 a.base_.prev->next = &a.base_;
228 }
229 else
230 {
231 a.base_.next = &a.base_;
232 a.base_.prev = &a.base_;
233 }
234
235 detail::adl_swap(a.node_size_, b.node_size_);
236 detail::adl_swap(a.capacity_, b.capacity_);
237
238 // reset markers for simplicity
239 a.alloc_chunk_ = a.dealloc_chunk_ = &a.base_;
240 b.alloc_chunk_ = b.dealloc_chunk_ = &b.base_;
241 }
242
243 void small_free_memory_list::insert(void* mem, std::size_t size) noexcept
244 {
245 FOONATHAN_MEMORY_ASSERT(mem);
246 FOONATHAN_MEMORY_ASSERT(is_aligned(mem, max_alignment));
247 debug_fill_internal(mem, size, false);
248
249 auto actual_size = node_size_ + 2 * fence_size();
250 auto total_chunk_size = chunk::memory_offset + actual_size * chunk::max_nodes;
251 auto align_buffer = align_offset(total_chunk_size, alignof(chunk));
252
253 auto no_chunks = size / (total_chunk_size + align_buffer);
254 auto remainder = size % (total_chunk_size + align_buffer);
255
256 auto memory = static_cast<char*>(mem);
257 auto construct_chunk = [&](std::size_t total_memory, std::size_t node_size) {
258 FOONATHAN_MEMORY_ASSERT(align_offset(memory, alignof(chunk)) == 0);
259 return ::new (static_cast<void*>(memory)) chunk(total_memory, node_size);
260 };
261
262 auto prev = static_cast<chunk_base*>(nullptr);
263 for (auto i = std::size_t(0); i != no_chunks; ++i)
264 {
265 auto c = construct_chunk(total_chunk_size, actual_size);
266
267 c->prev = prev;
268 if (prev)
269 prev->next = c;
270 prev = c;
271
272 memory += total_chunk_size;
273 memory += align_buffer;
274 }
275
276 auto new_nodes = no_chunks * chunk::max_nodes;
277 if (remainder >= chunk::memory_offset + actual_size) // at least one node
278 {
279 auto c = construct_chunk(remainder, actual_size);
280
281 c->prev = prev;
282 if (prev)
283 prev->next = c;
284 prev = c;
285
286 new_nodes += c->no_nodes;
287 }
288
289 FOONATHAN_MEMORY_ASSERT_MSG(new_nodes > 0, "memory block too small");
290 insert_chunks(&base_, static_cast<chunk_base*>(mem), prev);
291 capacity_ += new_nodes;
292 }
293
294 std::size_t small_free_memory_list::usable_size(std::size_t size) const noexcept
295 {
296 auto actual_size = node_size_ + 2 * fence_size();
297 auto total_chunk_size = chunk::memory_offset + actual_size * chunk::max_nodes;
298 auto no_chunks = size / total_chunk_size;
299 auto remainder = size % total_chunk_size;
300
301 return no_chunks * chunk::max_nodes * actual_size
302 + (remainder > chunk::memory_offset ? remainder - chunk::memory_offset : 0u);
303 }
304
305 void* small_free_memory_list::allocate() noexcept
306 {
307 auto chunk = find_chunk_impl(1);
308 alloc_chunk_ = chunk;
309 FOONATHAN_MEMORY_ASSERT(chunk && chunk->capacity >= 1);
310
311 --capacity_;
312
313 auto mem = chunk->allocate(node_size_ + 2 * fence_size());
314 FOONATHAN_MEMORY_ASSERT(mem);
315 return detail::debug_fill_new(mem, node_size_, fence_size());
316 }
317
318 void small_free_memory_list::deallocate(void* mem) noexcept
319 {
320 auto info =
321 allocator_info(FOONATHAN_MEMORY_LOG_PREFIX "::detail::small_free_memory_list", this);
322
323 auto actual_size = node_size_ + 2 * fence_size();
324 auto node = static_cast<unsigned char*>(detail::debug_fill_free(mem, node_size_, fence_size()));
325
326 auto chunk = find_chunk_impl(node);
327 dealloc_chunk_ = chunk;
328 // memory was never allocated from list
329 detail::debug_check_pointer([&] { return chunk != nullptr; }, info, mem);
330
331 auto offset = node - chunk->list_memory();
332 // memory is not at the right position
333 debug_check_pointer([&] { return offset % actual_size == 0u; }, info, mem);
334 // double-free
335 debug_check_double_dealloc([&] { return !chunk->contains(node, actual_size); }, info, mem);
336
337 auto index = offset / actual_size;
338 FOONATHAN_MEMORY_ASSERT(index < chunk->no_nodes);
339 chunk->deallocate(node, static_cast<unsigned char>(index));
340
341 ++capacity_;
342 }
343
344 std::size_t small_free_memory_list::alignment() const noexcept
345 {
346 return alignment_for(node_size_);
347 }
348
349 std::size_t small_free_memory_list::fence_size() const noexcept
350 {
351 // node size is fence size
352 return debug_fence_size ? node_size_ : 0u;
353 }
354
355 chunk* small_free_memory_list::find_chunk_impl(std::size_t n) noexcept
356 {
357 if (auto c = make_chunk(alloc_chunk_, n))
358 return c;
359 else if ((c = make_chunk(dealloc_chunk_, n)) != nullptr)
360 return c;
361
362 auto cur_forward = alloc_chunk_->next;
363 auto cur_backward = alloc_chunk_->prev;
364
365 do
366 {
367 if (auto c = make_chunk(cur_forward, n))
368 return c;
369 else if ((c = make_chunk(cur_backward, n)) != nullptr)
370 return c;
371
372 cur_forward = cur_forward->next;
373 cur_backward = cur_backward->prev;
374 FOONATHAN_MEMORY_ASSERT(cur_forward != alloc_chunk_);
375 FOONATHAN_MEMORY_ASSERT(cur_backward != alloc_chunk_);
376 } while (true);
377 FOONATHAN_MEMORY_UNREACHABLE("there is memory available somewhere...");
378 return nullptr;
379 }
380
381 chunk* small_free_memory_list::find_chunk_impl(unsigned char* node, chunk_base* first,
382 chunk_base* last) noexcept
383 {
384 auto actual_size = node_size_ + 2 * fence_size();
385
386 do
387 {
388 if (auto c = from_chunk(first, node, actual_size))
389 return c;
390 else if ((c = from_chunk(last, node, actual_size)) != nullptr)
391 return c;
392
393 first = first->next;
394 last = last->prev;
395 } while (!greater(first, last));
396 return nullptr;
397 }
398
399 chunk* small_free_memory_list::find_chunk_impl(unsigned char* node) noexcept
400 {
401 auto actual_size = node_size_ + 2 * fence_size();
402
403 if (auto c = from_chunk(dealloc_chunk_, node, actual_size))
404 return c;
405 else if ((c = from_chunk(alloc_chunk_, node, actual_size)) != nullptr)
406 return c;
407 else if (less(dealloc_chunk_, node))
408 {
409 // node is in (dealloc_chunk_, base_.prev]
410 return find_chunk_impl(node, dealloc_chunk_->next, base_.prev);
411 }
412 else if (greater(dealloc_chunk_, node))
413 {
414 // node is in [base.next, dealloc_chunk_)
415 return find_chunk_impl(node, base_.next, dealloc_chunk_->prev);
416 }
417 FOONATHAN_MEMORY_UNREACHABLE("must be in one half");
418 return nullptr;
419 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "error.hpp"
5
6 #include <atomic>
7
8 #if FOONATHAN_HOSTED_IMPLEMENTATION
9 #include <cstdio>
10 #endif
11
12 using namespace foonathan::memory;
13
14 namespace
15 {
16 void default_out_of_memory_handler(const allocator_info& info, std::size_t amount) noexcept
17 {
18 #if FOONATHAN_HOSTED_IMPLEMENTATION
19 std::fprintf(stderr,
20 "[%s] Allocator %s (at %p) ran out of memory trying to allocate %zu bytes.\n",
21 FOONATHAN_MEMORY_LOG_PREFIX, info.name, info.allocator, amount);
22 #endif
23 }
24
25 std::atomic<out_of_memory::handler> out_of_memory_h(default_out_of_memory_handler);
26 } // namespace
27
28 out_of_memory::handler out_of_memory::set_handler(out_of_memory::handler h)
29 {
30 return out_of_memory_h.exchange(h ? h : default_out_of_memory_handler);
31 }
32
33 out_of_memory::handler out_of_memory::get_handler()
34 {
35 return out_of_memory_h;
36 }
37
38 out_of_memory::out_of_memory(const allocator_info& info, std::size_t amount)
39 : info_(info), amount_(amount)
40 {
41 out_of_memory_h.load()(info, amount);
42 }
43
44 const char* out_of_memory::what() const noexcept
45 {
46 return "low-level allocator is out of memory";
47 }
48
49 const char* out_of_fixed_memory::what() const noexcept
50 {
51 return "fixed size allocator is out of memory";
52 }
53
54 namespace
55 {
56 void default_bad_alloc_size_handler(const allocator_info& info, std::size_t passed,
57 std::size_t supported) noexcept
58 {
59 #if FOONATHAN_HOSTED_IMPLEMENTATION
60 std::fprintf(stderr,
61 "[%s] Allocator %s (at %p) received invalid size/alignment %zu, "
62 "max supported is %zu",
63 FOONATHAN_MEMORY_LOG_PREFIX, info.name, info.allocator, passed, supported);
64 #endif
65 }
66
67 std::atomic<bad_allocation_size::handler> bad_alloc_size_h(default_bad_alloc_size_handler);
68 } // namespace
69
70 bad_allocation_size::handler bad_allocation_size::set_handler(bad_allocation_size::handler h)
71 {
72 return bad_alloc_size_h.exchange(h ? h : default_bad_alloc_size_handler);
73 }
74
75 bad_allocation_size::handler bad_allocation_size::get_handler()
76 {
77 return bad_alloc_size_h;
78 }
79
80 bad_allocation_size::bad_allocation_size(const allocator_info& info, std::size_t passed,
81 std::size_t supported)
82 : info_(info), passed_(passed), supported_(supported)
83 {
84 bad_alloc_size_h.load()(info_, passed_, supported_);
85 }
86
87 const char* bad_allocation_size::what() const noexcept
88 {
89 return "allocation node size exceeds supported maximum of allocator";
90 }
91
92 const char* bad_node_size::what() const noexcept
93 {
94 return "allocation node size exceeds supported maximum of allocator";
95 }
96
97 const char* bad_array_size::what() const noexcept
98 {
99 return "allocation array size exceeds supported maximum of allocator";
100 }
101
102 const char* bad_alignment::what() const noexcept
103 {
104 return "allocation alignment exceeds supported maximum of allocator";
105 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "heap_allocator.hpp"
5
6 #include "error.hpp"
7
8 using namespace foonathan::memory;
9
10 #ifdef _WIN32
11 #include <malloc.h>
12 #include <windows.h>
13
14 namespace
15 {
16 HANDLE get_process_heap() noexcept
17 {
18 static auto heap = GetProcessHeap();
19 return heap;
20 }
21
22 std::size_t max_size() noexcept
23 {
24 return _HEAP_MAXREQ;
25 }
26 }
27
28 void* foonathan::memory::heap_alloc(std::size_t size) noexcept
29 {
30 return HeapAlloc(get_process_heap(), 0, size);
31 }
32
33 void foonathan::memory::heap_dealloc(void* ptr, std::size_t) noexcept
34 {
35 HeapFree(get_process_heap(), 0, ptr);
36 }
37
38 #elif FOONATHAN_HOSTED_IMPLEMENTATION
39 #include <cstdlib>
40 #include <memory>
41
42 void* foonathan::memory::heap_alloc(std::size_t size) noexcept
43 {
44 return std::malloc(size);
45 }
46
47 void foonathan::memory::heap_dealloc(void* ptr, std::size_t) noexcept
48 {
49 std::free(ptr);
50 }
51
52 namespace
53 {
54 std::size_t max_size() noexcept
55 {
56 return std::allocator_traits<std::allocator<char>>::max_size({});
57 }
58 }
59 #else
60 // no implementation for heap_alloc/heap_dealloc
61
62 namespace
63 {
64 std::size_t max_size() noexcept
65 {
66 return std::size_t(-1);
67 }
68 }
69 #endif
70
71 allocator_info detail::heap_allocator_impl::info() noexcept
72 {
73 return {FOONATHAN_MEMORY_LOG_PREFIX "::heap_allocator", nullptr};
74 }
75
76 std::size_t detail::heap_allocator_impl::max_node_size() noexcept
77 {
78 return max_size();
79 }
80
81 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
82 template class detail::lowlevel_allocator<detail::heap_allocator_impl>;
83 template class foonathan::memory::allocator_traits<heap_allocator>;
84 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "iteration_allocator.hpp"
5
6 using namespace foonathan::memory;
7
8 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
9 template class foonathan::memory::iteration_allocator<2>;
10 template class foonathan::memory::allocator_traits<iteration_allocator<2>>;
11 template class foonathan::memory::composable_allocator_traits<iteration_allocator<2>>;
12 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "config.hpp"
5 #if FOONATHAN_HOSTED_IMPLEMENTATION
6
7 #include "malloc_allocator.hpp"
8
9 #include "error.hpp"
10
11 using namespace foonathan::memory;
12
13 allocator_info detail::malloc_allocator_impl::info() noexcept
14 {
15 return {FOONATHAN_MEMORY_LOG_PREFIX "::malloc_allocator", nullptr};
16 }
17
18 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
19 template class detail::lowlevel_allocator<detail::malloc_allocator_impl>;
20 template class foonathan::memory::allocator_traits<malloc_allocator>;
21 #endif
22
23 #endif // FOONATHAN_HOSTED_IMPLEMENTATION
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_arena.hpp"
5
6 #include <new>
7
8 #include "detail/align.hpp"
9
10 using namespace foonathan::memory;
11 using namespace detail;
12
13 void memory_block_stack::push(allocated_mb block) noexcept
14 {
15 FOONATHAN_MEMORY_ASSERT(is_aligned(block.memory, max_alignment));
16 auto next = ::new (block.memory) node(head_, block.size - implementation_offset());
17 head_ = next;
18 }
19
20 memory_block_stack::allocated_mb memory_block_stack::pop() noexcept
21 {
22 FOONATHAN_MEMORY_ASSERT(head_);
23 auto to_pop = head_;
24 head_ = head_->prev;
25 return {to_pop, to_pop->usable_size + implementation_offset()};
26 }
27
28 void memory_block_stack::steal_top(memory_block_stack& other) noexcept
29 {
30 FOONATHAN_MEMORY_ASSERT(other.head_);
31 auto to_steal = other.head_;
32 other.head_ = other.head_->prev;
33
34 to_steal->prev = head_;
35 head_ = to_steal;
36 }
37
38 bool memory_block_stack::owns(const void* ptr) const noexcept
39 {
40 auto address = static_cast<const char*>(ptr);
41 for (auto cur = head_; cur; cur = cur->prev)
42 {
43 auto mem = static_cast<char*>(static_cast<void*>(cur));
44 if (address >= mem && address < mem + cur->usable_size)
45 return true;
46 }
47 return false;
48 }
49
50 std::size_t memory_block_stack::size() const noexcept
51 {
52 std::size_t res = 0u;
53 for (auto cur = head_; cur; cur = cur->prev)
54 ++res;
55 return res;
56 }
57
58 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
59 template class foonathan::memory::memory_arena<static_block_allocator, true>;
60 template class foonathan::memory::memory_arena<static_block_allocator, false>;
61 template class foonathan::memory::memory_arena<virtual_block_allocator, true>;
62 template class foonathan::memory::memory_arena<virtual_block_allocator, false>;
63
64 template class foonathan::memory::growing_block_allocator<>;
65 template class foonathan::memory::memory_arena<growing_block_allocator<>, true>;
66 template class foonathan::memory::memory_arena<growing_block_allocator<>, false>;
67
68 template class foonathan::memory::fixed_block_allocator<>;
69 template class foonathan::memory::memory_arena<fixed_block_allocator<>, true>;
70 template class foonathan::memory::memory_arena<fixed_block_allocator<>, false>;
71 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_pool.hpp"
5
6 #include "debugging.hpp"
7
8 using namespace foonathan::memory;
9
10 void detail::memory_pool_leak_handler::operator()(std::ptrdiff_t amount)
11 {
12 get_leak_handler()({FOONATHAN_MEMORY_LOG_PREFIX "::memory_pool", this}, amount);
13 }
14
15 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
16 template class foonathan::memory::memory_pool<node_pool>;
17 template class foonathan::memory::memory_pool<array_pool>;
18 template class foonathan::memory::memory_pool<small_node_pool>;
19
20 template class foonathan::memory::allocator_traits<memory_pool<node_pool>>;
21 template class foonathan::memory::allocator_traits<memory_pool<array_pool>>;
22 template class foonathan::memory::allocator_traits<memory_pool<small_node_pool>>;
23
24 template class foonathan::memory::composable_allocator_traits<memory_pool<node_pool>>;
25 template class foonathan::memory::composable_allocator_traits<memory_pool<array_pool>>;
26 template class foonathan::memory::composable_allocator_traits<memory_pool<small_node_pool>>;
27 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_pool_collection.hpp"
5
6 #include "debugging.hpp"
7
8 using namespace foonathan::memory;
9
10 void detail::memory_pool_collection_leak_handler::operator()(std::ptrdiff_t amount)
11 {
12 get_leak_handler()({FOONATHAN_MEMORY_LOG_PREFIX "::memory_pool_collection", this}, amount);
13 }
14
15 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
16 template class foonathan::memory::memory_pool_collection<node_pool, identity_buckets>;
17 template class foonathan::memory::memory_pool_collection<array_pool, identity_buckets>;
18 template class foonathan::memory::memory_pool_collection<small_node_pool, identity_buckets>;
19
20 template class foonathan::memory::memory_pool_collection<node_pool, log2_buckets>;
21 template class foonathan::memory::memory_pool_collection<array_pool, log2_buckets>;
22 template class foonathan::memory::memory_pool_collection<small_node_pool, log2_buckets>;
23
24 template class foonathan::memory::allocator_traits<memory_pool_collection<node_pool,
25 identity_buckets>>;
26 template class foonathan::memory::allocator_traits<memory_pool_collection<array_pool,
27 identity_buckets>>;
28 template class foonathan::memory::allocator_traits<memory_pool_collection<small_node_pool,
29 identity_buckets>>;
30
31 template class foonathan::memory::allocator_traits<memory_pool_collection<node_pool, log2_buckets>>;
32 template class foonathan::memory::allocator_traits<memory_pool_collection<array_pool,
33 log2_buckets>>;
34 template class foonathan::memory::allocator_traits<memory_pool_collection<small_node_pool,
35 log2_buckets>>;
36
37 template class foonathan::memory::
38 composable_allocator_traits<memory_pool_collection<node_pool, identity_buckets>>;
39 template class foonathan::memory::
40 composable_allocator_traits<memory_pool_collection<array_pool, identity_buckets>>;
41 template class foonathan::memory::
42 composable_allocator_traits<memory_pool_collection<small_node_pool, identity_buckets>>;
43
44 template class foonathan::memory::composable_allocator_traits<memory_pool_collection<node_pool,
45 log2_buckets>>;
46 template class foonathan::memory::composable_allocator_traits<memory_pool_collection<array_pool,
47 log2_buckets>>;
48 template class foonathan::memory::
49 composable_allocator_traits<memory_pool_collection<small_node_pool, log2_buckets>>;
50 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_stack.hpp"
5
6 #include "debugging.hpp"
7
8 using namespace foonathan::memory;
9
10 void detail::memory_stack_leak_handler::operator()(std::ptrdiff_t amount)
11 {
12 get_leak_handler()({FOONATHAN_MEMORY_LOG_PREFIX "::memory_stack", this}, amount);
13 }
14
15 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
16 template class foonathan::memory::memory_stack<>;
17 template class foonathan::memory::memory_stack_raii_unwind<memory_stack<>>;
18 template class foonathan::memory::allocator_traits<memory_stack<>>;
19 template class foonathan::memory::composable_allocator_traits<memory_stack<>>;
20 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "new_allocator.hpp"
5
6 #if FOONATHAN_HOSTED_IMPLEMENTATION
7 #include <memory>
8 #endif
9
10 #include <new>
11
12 #include "error.hpp"
13
14 using namespace foonathan::memory;
15
16 allocator_info detail::new_allocator_impl::info() noexcept
17 {
18 return {FOONATHAN_MEMORY_LOG_PREFIX "::new_allocator", nullptr};
19 }
20
21 void* detail::new_allocator_impl::allocate(std::size_t size, size_t) noexcept
22 {
23 void* memory = nullptr;
24 while (true)
25 {
26 memory = ::operator new(size, std::nothrow);
27 if (memory)
28 break;
29
30 auto handler = std::get_new_handler();
31 if (handler)
32 {
33 #if FOONATHAN_HAS_EXCEPTION_SUPPORT
34 try
35 {
36 handler();
37 }
38 catch (...)
39 {
40 return nullptr;
41 }
42 #else
43 handler();
44 #endif
45 }
46 else
47 {
48 return nullptr;
49 }
50 }
51 return memory;
52 }
53
54 void detail::new_allocator_impl::deallocate(void* ptr, std::size_t, size_t) noexcept
55 {
56 ::operator delete(ptr);
57 }
58
59 std::size_t detail::new_allocator_impl::max_node_size() noexcept
60 {
61 #if FOONATHAN_HOSTED_IMPLEMENTATION
62 return std::allocator_traits<std::allocator<char>>::max_size({});
63 #else
64 return std::size_t(-1);
65 #endif
66 }
67
68 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
69 template class detail::lowlevel_allocator<detail::new_allocator_impl>;
70 template class foonathan::memory::allocator_traits<new_allocator>;
71 #endif
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "static_allocator.hpp"
5
6 #include "detail/debug_helpers.hpp"
7 #include "error.hpp"
8 #include "memory_arena.hpp"
9
10 using namespace foonathan::memory;
11
12 void* static_allocator::allocate_node(std::size_t size, std::size_t alignment)
13 {
14 auto mem = stack_.allocate(end_, size, alignment);
15 if (!mem)
16 FOONATHAN_THROW(out_of_fixed_memory(info(), size));
17 return mem;
18 }
19
20 allocator_info static_allocator::info() const noexcept
21 {
22 return {FOONATHAN_MEMORY_LOG_PREFIX "::static_allocator", this};
23 }
24
25 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
26 template class foonathan::memory::allocator_traits<static_allocator>;
27 #endif
28
29 memory_block static_block_allocator::allocate_block()
30 {
31 if (cur_ + block_size_ > end_)
32 FOONATHAN_THROW(out_of_fixed_memory(info(), block_size_));
33 auto mem = cur_;
34 cur_ += block_size_;
35 return {mem, block_size_};
36 }
37
38 void static_block_allocator::deallocate_block(memory_block block) noexcept
39 {
40 detail::
41 debug_check_pointer([&] { return static_cast<char*>(block.memory) + block.size == cur_; },
42 info(), block.memory);
43 cur_ -= block_size_;
44 }
45
46 allocator_info static_block_allocator::info() const noexcept
47 {
48 return {FOONATHAN_MEMORY_LOG_PREFIX "::static_block_allocator", this};
49 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "temporary_allocator.hpp"
5
6 #include <new>
7 #include <type_traits>
8
9 #include "detail/assert.hpp"
10 #include "default_allocator.hpp"
11 #include "error.hpp"
12
13 using namespace foonathan::memory;
14
15 namespace
16 {
17 void default_growth_tracker(std::size_t) noexcept {}
18
19 using temporary_impl_allocator = default_allocator;
20 using temporary_impl_allocator_traits = allocator_traits<temporary_impl_allocator>;
21 } // namespace
22
23 detail::temporary_block_allocator::temporary_block_allocator(std::size_t block_size) noexcept
24 : tracker_(default_growth_tracker), block_size_(block_size)
25 {
26 }
27
28 detail::temporary_block_allocator::growth_tracker detail::temporary_block_allocator::
29 set_growth_tracker(growth_tracker t) noexcept
30 {
31 auto old = tracker_;
32 tracker_ = t;
33 return old;
34 }
35
36 detail::temporary_block_allocator::growth_tracker detail::temporary_block_allocator::
37 get_growth_tracker() noexcept
38 {
39 return tracker_;
40 }
41
42 memory_block detail::temporary_block_allocator::allocate_block()
43 {
44 auto alloc = temporary_impl_allocator();
45 auto memory = temporary_impl_allocator_traits::allocate_array(alloc, block_size_, 1,
46 detail::max_alignment);
47 auto block = memory_block(memory, block_size_);
48 block_size_ = std::size_t(block_size_
49 * growing_block_allocator<temporary_impl_allocator>::growth_factor());
50 return block;
51 }
52
53 void detail::temporary_block_allocator::deallocate_block(memory_block block)
54 {
55 auto alloc = temporary_impl_allocator();
56 temporary_impl_allocator_traits::deallocate_array(alloc, block.memory, block.size, 1,
57 detail::max_alignment);
58 }
59
60 #if FOONATHAN_MEMORY_TEMPORARY_STACK_MODE >= 2
61 // lifetime managment through the nifty counter and the list
62 // note: I could have used a simple `thread_local` variable for the temporary stack
63 // but this could lead to issues with destruction order
64 // and more importantly I have to support platforms that can't handle non-trivial thread local's
65 // hence I need to dynamically allocate the stack's and store them in a container
66 // on program exit the container is iterated and all stack's are properly destroyed
67 // if a thread exit can be detected, the dynamic memory of the stack is already released,
68 // but not the stack itself destroyed
69
70 #if !defined(__MINGW64__)
71
72 // only use the thread exit detector if we have thread local and are not running on MinGW due to a bug
73 // see: https://sourceforge.net/p/mingw-w64/bugs/527/
74 #define FOONATHAN_MEMORY_THREAD_EXIT_DETECTOR 1
75
76 #else
77 #define FOONATHAN_MEMORY_THREAD_EXIT_DETECTOR 0
78
79 #if defined(_MSC_VER)
80 #pragma message( \
81 "thread_local doesn't support destructors, need to use the temporary_stack_initializer to ensure proper cleanup of the temporary memory")
82 #else
83 #warning \
84 "thread_local doesn't support destructors, need to use the temporary_stack_initializer to ensure proper cleanup of the temporary memory"
85 #endif
86
87 #endif
88
89 static class detail::temporary_stack_list
90 {
91 public:
92 std::atomic<temporary_stack_list_node*> first;
93
94 temporary_stack* create_new(std::size_t size)
95 {
96 auto storage =
97 default_allocator().allocate_node(sizeof(temporary_stack), alignof(temporary_stack));
98 return ::new (storage) temporary_stack(0, size);
99 }
100
101 temporary_stack* find_unused()
102 {
103 for (auto ptr = first.load(); ptr; ptr = ptr->next_)
104 {
105 auto value = false;
106 if (ptr->in_use_.compare_exchange_strong(value, true))
107 return static_cast<temporary_stack*>(ptr);
108 }
109
110 return nullptr;
111 }
112
113 temporary_stack* create(std::size_t size)
114 {
115 if (auto ptr = find_unused())
116 {
117 FOONATHAN_MEMORY_ASSERT(ptr->in_use_);
118 ptr->stack_ = detail::temporary_stack_impl(size);
119 return ptr;
120 }
121 return create_new(size);
122 }
123
124 void clear(temporary_stack& stack)
125 {
126 // stack should be empty now, so shrink_to_fit() clears all memory
127 stack.stack_.shrink_to_fit();
128 stack.in_use_ = false; // mark as free
129 }
130
131 void destroy()
132 {
133 for (auto ptr = first.exchange(nullptr); ptr;)
134 {
135 auto stack = static_cast<temporary_stack*>(ptr);
136 auto next = ptr->next_;
137
138 stack->~temporary_stack();
139 default_allocator().deallocate_node(stack, sizeof(temporary_stack),
140 alignof(temporary_stack));
141
142 ptr = next;
143 }
144
145 FOONATHAN_MEMORY_ASSERT_MSG(!first.load(),
146 "destroy() called while other threads are still running");
147 }
148 } temporary_stack_list_obj;
149
150 namespace
151 {
152 thread_local std::size_t nifty_counter;
153 thread_local temporary_stack* temp_stack = nullptr;
154
155 #if FOONATHAN_MEMORY_THREAD_EXIT_DETECTOR
156 // don't use this on a bug
157 thread_local struct thread_exit_detector_t
158 {
159 ~thread_exit_detector_t() noexcept
160 {
161 if (temp_stack)
162 // clear automatically on thread exit, as the initializer's destructor does
163 // note: if another's thread_local variable destructor is called after this one
164 // and that destructor uses the temporary allocator
165 // the stack needs to grow again
166 // but who does temporary allocation in a destructor?!
167 temporary_stack_list_obj.clear(*temp_stack);
168 }
169 } thread_exit_detector;
170 #endif
171 } // namespace
172
173 detail::temporary_stack_list_node::temporary_stack_list_node(int) noexcept : in_use_(true)
174 {
175 next_ = temporary_stack_list_obj.first.load();
176 while (!temporary_stack_list_obj.first.compare_exchange_weak(next_, this))
177 ;
178 #if FOONATHAN_MEMORY_THREAD_EXIT_DETECTOR
179 (void)&thread_exit_detector; // ODR-use it, so it will be created
180 #endif
181 }
182
183 detail::temporary_allocator_dtor_t::temporary_allocator_dtor_t() noexcept
184 {
185 ++nifty_counter;
186 }
187
188 detail::temporary_allocator_dtor_t::~temporary_allocator_dtor_t() noexcept
189 {
190 if (--nifty_counter == 0u && temp_stack)
191 temporary_stack_list_obj.destroy();
192 }
193
194 temporary_stack_initializer::temporary_stack_initializer(std::size_t initial_size)
195 {
196 if (!temp_stack)
197 temp_stack = temporary_stack_list_obj.create(initial_size);
198 }
199
200 temporary_stack_initializer::~temporary_stack_initializer() noexcept
201 {
202 // don't destroy, nifty counter does that
203 // but can get rid of all the memory
204 if (temp_stack)
205 temporary_stack_list_obj.clear(*temp_stack);
206 }
207
208 temporary_stack& foonathan::memory::get_temporary_stack(std::size_t initial_size)
209 {
210 if (!temp_stack)
211 temp_stack = temporary_stack_list_obj.create(initial_size);
212 return *temp_stack;
213 }
214
215 #elif FOONATHAN_MEMORY_TEMPORARY_STACK_MODE == 1
216
217 namespace
218 {
219 FOONATHAN_THREAD_LOCAL alignas(
220 temporary_stack) char temporary_stack_storage[sizeof(temporary_stack)];
221 FOONATHAN_THREAD_LOCAL bool is_created = false;
222
223 temporary_stack& get() noexcept
224 {
225 FOONATHAN_MEMORY_ASSERT(is_created);
226 return *static_cast<temporary_stack*>(static_cast<void*>(&temporary_stack_storage));
227 }
228
229 void create(std::size_t initial_size)
230 {
231 if (!is_created)
232 {
233 ::new (static_cast<void*>(&temporary_stack_storage)) temporary_stack(initial_size);
234 is_created = true;
235 }
236 }
237 } // namespace
238
239 // explicit lifetime managment
240 temporary_stack_initializer::temporary_stack_initializer(std::size_t initial_size)
241 {
242 create(initial_size);
243 }
244
245 temporary_stack_initializer::~temporary_stack_initializer()
246 {
247 if (is_created)
248 get().~temporary_stack();
249 }
250
251 temporary_stack& foonathan::memory::get_temporary_stack(std::size_t initial_size)
252 {
253 create(initial_size);
254 return get();
255 }
256
257 #else
258
259 // no lifetime managment
260
261 temporary_stack_initializer::temporary_stack_initializer(std::size_t initial_size)
262 {
263 if (initial_size != 0u)
264 FOONATHAN_MEMORY_WARNING("temporary_stack_initializer() has no effect if "
265 "FOONATHAN_MEMORY_TEMPORARY_STACK == 0 (pass an initial size of 0 "
266 "to disable this message)");
267 }
268
269 temporary_stack_initializer::~temporary_stack_initializer() {}
270
271 temporary_stack& foonathan::memory::get_temporary_stack(std::size_t)
272 {
273 FOONATHAN_MEMORY_UNREACHABLE("get_temporary_stack() called but stack is disabled by "
274 "FOONATHAN_MEMORY_TEMPORARY_STACK == 0");
275 std::abort();
276 }
277
278 #endif
279
280 const temporary_stack_initializer::defer_create_t temporary_stack_initializer::defer_create;
281
282 temporary_allocator::temporary_allocator() : temporary_allocator(get_temporary_stack()) {}
283
284 temporary_allocator::temporary_allocator(temporary_stack& stack)
285 : unwind_(stack), prev_(stack.top_), shrink_to_fit_(false)
286 {
287 FOONATHAN_MEMORY_ASSERT(!prev_ || prev_->is_active());
288 stack.top_ = this;
289 }
290
291 temporary_allocator::~temporary_allocator() noexcept
292 {
293 if (is_active())
294 {
295 auto& stack = unwind_.get_stack();
296 stack.top_ = prev_;
297 unwind_.unwind(); // manually call it now...
298 if (shrink_to_fit_)
299 // to call shrink_to_fit() afterwards
300 stack.stack_.shrink_to_fit();
301 }
302 }
303
304 void* temporary_allocator::allocate(std::size_t size, std::size_t alignment)
305 {
306 FOONATHAN_MEMORY_ASSERT_MSG(is_active(), "object isn't the active allocator");
307 return unwind_.get_stack().stack_.allocate(size, alignment);
308 }
309
310 void temporary_allocator::shrink_to_fit() noexcept
311 {
312 shrink_to_fit_ = true;
313 }
314
315 bool temporary_allocator::is_active() const noexcept
316 {
317 FOONATHAN_MEMORY_ASSERT(unwind_.will_unwind());
318 auto res = unwind_.get_stack().top_ == this;
319 // check that prev is actually before this
320 FOONATHAN_MEMORY_ASSERT(!res || !prev_ || prev_->unwind_.get_marker() <= unwind_.get_marker());
321 return res;
322 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "virtual_memory.hpp"
5
6 #include "detail/debug_helpers.hpp"
7 #include "error.hpp"
8 #include "memory_arena.hpp"
9
10 using namespace foonathan::memory;
11
12 void detail::virtual_memory_allocator_leak_handler::operator()(std::ptrdiff_t amount)
13 {
14 detail::debug_handle_memory_leak({FOONATHAN_MEMORY_LOG_PREFIX "::virtual_memory_allocator",
15 nullptr},
16 amount);
17 }
18
19 #if defined(_WIN32)
20 #include <windows.h>
21
22 namespace
23 {
24 std::size_t get_page_size() noexcept
25 {
26 static_assert(sizeof(std::size_t) >= sizeof(DWORD), "possible loss of data");
27
28 SYSTEM_INFO info;
29 GetSystemInfo(&info);
30 return std::size_t(info.dwPageSize);
31 }
32 } // namespace
33
34 const std::size_t foonathan::memory::virtual_memory_page_size = get_page_size();
35
36 void* foonathan::memory::virtual_memory_reserve(std::size_t no_pages) noexcept
37 {
38 auto pages =
39 #if (_MSC_VER <= 1900)
40 VirtualAlloc(nullptr, no_pages * virtual_memory_page_size, MEM_RESERVE, PAGE_READWRITE);
41 #else
42 VirtualAllocFromApp(nullptr, no_pages * virtual_memory_page_size, MEM_RESERVE,
43 PAGE_READWRITE);
44 #endif
45 return pages;
46 }
47
48 void foonathan::memory::virtual_memory_release(void* pages, std::size_t) noexcept
49 {
50 auto result = VirtualFree(pages, 0u, MEM_RELEASE);
51 FOONATHAN_MEMORY_ASSERT_MSG(result, "cannot release pages");
52 }
53
54 void* foonathan::memory::virtual_memory_commit(void* memory, std::size_t no_pages) noexcept
55 {
56 auto region =
57 #if (_MSC_VER <= 1900)
58 VirtualAlloc(memory, no_pages * virtual_memory_page_size, MEM_COMMIT, PAGE_READWRITE);
59 #else
60 VirtualAllocFromApp(memory, no_pages * virtual_memory_page_size, MEM_COMMIT,
61 PAGE_READWRITE);
62 #endif
63 if (!region)
64 return nullptr;
65 FOONATHAN_MEMORY_ASSERT(region == memory);
66 return region;
67 }
68
69 void foonathan::memory::virtual_memory_decommit(void* memory, std::size_t no_pages) noexcept
70 {
71 auto result = VirtualFree(memory, no_pages * virtual_memory_page_size, MEM_DECOMMIT);
72 FOONATHAN_MEMORY_ASSERT_MSG(result, "cannot decommit memory");
73 }
74 #elif defined(__unix__) || defined(__APPLE__) || defined(__VXWORKS__) \
75 || defined(__QNXNTO__) // POSIX systems
76 #include <sys/mman.h>
77 #include <unistd.h>
78
79 #if defined(PAGESIZE)
80 const std::size_t foonathan::memory::virtual_memory_page_size = PAGESIZE;
81 #elif defined(PAGE_SIZE)
82 const std::size_t foonathan::memory::virtual_memory_page_size = PAGE_SIZE;
83 #else
84 const std::size_t foonathan::memory::virtual_memory_page_size = sysconf(_SC_PAGESIZE);
85 #endif
86
87 #ifndef MAP_ANONYMOUS
88 #define MAP_ANONYMOUS MAP_ANON
89 #endif
90
91 void* foonathan::memory::virtual_memory_reserve(std::size_t no_pages) noexcept
92 {
93 auto pages = mmap(nullptr, no_pages * virtual_memory_page_size, PROT_NONE,
94 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
95 return pages == MAP_FAILED ? nullptr : pages;
96 }
97
98 void foonathan::memory::virtual_memory_release(void* pages, std::size_t no_pages) noexcept
99 {
100 auto result = munmap(pages, no_pages * virtual_memory_page_size);
101 FOONATHAN_MEMORY_ASSERT_MSG(result == 0, "cannot release pages");
102 (void)result;
103 }
104
105 void* foonathan::memory::virtual_memory_commit(void* memory, std::size_t no_pages) noexcept
106 {
107 auto size = no_pages * virtual_memory_page_size;
108 auto result = mprotect(memory, size, PROT_WRITE | PROT_READ);
109 if (result != 0u)
110 return nullptr;
111
112 // advise that the memory will be needed
113 #if defined(MADV_WILLNEED)
114 madvise(memory, size, MADV_WILLNEED);
115 #elif defined(POSIX_MADV_WILLNEED)
116 posix_madvise(memory, size, POSIX_MADV_WILLNEED);
117 #endif
118
119 return memory;
120 }
121
122 void foonathan::memory::virtual_memory_decommit(void* memory, std::size_t no_pages) noexcept
123 {
124 auto size = no_pages * virtual_memory_page_size;
125 // advise that the memory won't be needed anymore
126 #if defined(MADV_FREE)
127 madvise(memory, size, MADV_FREE);
128 #elif defined(MADV_DONTNEED)
129 madvise(memory, size, MADV_DONTNEED);
130 #elif defined(POSIX_MADV_DONTNEED)
131 posix_madvise(memory, size, POSIX_MADV_DONTNEED);
132 #endif
133
134 auto result = mprotect(memory, size, PROT_NONE);
135 FOONATHAN_MEMORY_ASSERT_MSG(result == 0, "cannot decommit memory");
136 (void)result;
137 }
138 #else
139 #warning "virtual memory functions not available on your platform, define your own"
140 #endif
141
142 namespace
143 {
144 std::size_t calc_no_pages(std::size_t size) noexcept
145 {
146 auto div = size / virtual_memory_page_size;
147 auto rest = size % virtual_memory_page_size;
148
149 return div + (rest != 0u) + (detail::debug_fence_size ? 2u : 1u);
150 }
151 } // namespace
152
153 void* virtual_memory_allocator::allocate_node(std::size_t size, std::size_t)
154 {
155 auto no_pages = calc_no_pages(size);
156 auto pages = virtual_memory_reserve(no_pages);
157 if (!pages || !virtual_memory_commit(pages, no_pages))
158 FOONATHAN_THROW(
159 out_of_memory({FOONATHAN_MEMORY_LOG_PREFIX "::virtual_memory_allocator", nullptr},
160 no_pages * virtual_memory_page_size));
161 on_allocate(size);
162
163 return detail::debug_fill_new(pages, size, virtual_memory_page_size);
164 }
165
166 void virtual_memory_allocator::deallocate_node(void* node, std::size_t size, std::size_t) noexcept
167 {
168 auto pages = detail::debug_fill_free(node, size, virtual_memory_page_size);
169
170 on_deallocate(size);
171
172 auto no_pages = calc_no_pages(size);
173 virtual_memory_decommit(pages, no_pages);
174 virtual_memory_release(pages, no_pages);
175 }
176
177 std::size_t virtual_memory_allocator::max_node_size() const noexcept
178 {
179 return std::size_t(-1);
180 }
181
182 std::size_t virtual_memory_allocator::max_alignment() const noexcept
183 {
184 return virtual_memory_page_size;
185 }
186
187 #if FOONATHAN_MEMORY_EXTERN_TEMPLATE
188 template class foonathan::memory::allocator_traits<virtual_memory_allocator>;
189 #endif
190
191 virtual_block_allocator::virtual_block_allocator(std::size_t block_size, std::size_t no_blocks)
192 : block_size_(block_size)
193 {
194 FOONATHAN_MEMORY_ASSERT(block_size % virtual_memory_page_size == 0u);
195 FOONATHAN_MEMORY_ASSERT(no_blocks > 0);
196 auto total_size = block_size_ * no_blocks;
197 auto no_pages = total_size / virtual_memory_page_size;
198
199 cur_ = static_cast<char*>(virtual_memory_reserve(no_pages));
200 if (!cur_)
201 FOONATHAN_THROW(out_of_memory(info(), total_size));
202 end_ = cur_ + total_size;
203 }
204
205 virtual_block_allocator::~virtual_block_allocator() noexcept
206 {
207 virtual_memory_release(cur_, (end_ - cur_) / virtual_memory_page_size);
208 }
209
210 memory_block virtual_block_allocator::allocate_block()
211 {
212 if (std::size_t(end_ - cur_) < block_size_)
213 FOONATHAN_THROW(out_of_fixed_memory(info(), block_size_));
214 auto mem = virtual_memory_commit(cur_, block_size_ / virtual_memory_page_size);
215 if (!mem)
216 FOONATHAN_THROW(out_of_fixed_memory(info(), block_size_));
217 cur_ += block_size_;
218 return {mem, block_size_};
219 }
220
221 void virtual_block_allocator::deallocate_block(memory_block block) noexcept
222 {
223 detail::
224 debug_check_pointer([&] { return static_cast<char*>(block.memory) == cur_ - block_size_; },
225 info(), block.memory);
226 cur_ -= block_size_;
227 virtual_memory_decommit(cur_, block_size_);
228 }
229
230 allocator_info virtual_block_allocator::info() noexcept
231 {
232 return {FOONATHAN_MEMORY_LOG_PREFIX "::virtual_block_allocator", this};
233 }
0 # Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 # This file is subject to the license terms in the LICENSE file
2 # found in the top-level directory of this distribution.
3
4 # builds test
5
6 add_executable(foonathan_memory_profiling benchmark.hpp profiling.cpp)
7 target_link_libraries(foonathan_memory_profiling foonathan_memory)
8 target_include_directories(foonathan_memory_profiling PRIVATE
9 ${FOONATHAN_MEMORY_SOURCE_DIR}/include/foonathan/memory)
10
11 if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/catch.hpp)
12 file(DOWNLOAD
13 https://raw.githubusercontent.com/catchorg/Catch2/2e61d38c7c3078e600c331257b5bebfb81aaa685/single_include/catch2/catch.hpp
14 ${CMAKE_CURRENT_BINARY_DIR}/catch.hpp
15 STATUS status
16 LOG log)
17
18 list(GET status 0 status_code)
19 list(GET status 1 status_string)
20
21 if(NOT status_code EQUAL 0)
22 message(FATAL_ERROR "error downloading catch: ${status_string}"
23 "${log}")
24 endif()
25 endif()
26
27 set(tests
28 test_allocator.hpp
29 test.cpp
30 detail/align.cpp
31 detail/debug_helpers.cpp
32 detail/free_list.cpp
33 detail/free_list_array.cpp
34 detail/ilog2.cpp
35 detail/memory_stack.cpp
36 aligned_allocator.cpp
37 allocator_traits.cpp
38 default_allocator.cpp
39 fallback_allocator.cpp
40 iteration_allocator.cpp
41 joint_allocator.cpp
42 memory_arena.cpp
43 memory_pool.cpp
44 memory_pool_collection.cpp
45 memory_resource_adapter.cpp
46 memory_stack.cpp
47 segregator.cpp
48 smart_ptr.cpp)
49
50 add_executable(foonathan_memory_test ${tests})
51 target_link_libraries(foonathan_memory_test foonathan_memory)
52 target_include_directories(foonathan_memory_test PRIVATE
53 ${CMAKE_CURRENT_BINARY_DIR}
54 ${FOONATHAN_MEMORY_SOURCE_DIR}/include/foonathan/memory)
55
56 add_test(NAME test COMMAND foonathan_memory_test)
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "aligned_allocator.hpp"
5
6 #include <catch.hpp>
7
8 #include "detail/align.hpp"
9 #include "allocator_storage.hpp"
10 #include "memory_stack.hpp"
11
12 using namespace foonathan::memory;
13
14 TEST_CASE("aligned_allocator", "[adapter]")
15 {
16 using allocator_t = aligned_allocator<allocator_reference<memory_stack<>>>;
17
18 memory_stack<> stack(1024);
19 stack.allocate(3, 1); // manual misalign
20
21 allocator_t alloc(4u, stack);
22 REQUIRE(alloc.min_alignment() == 4u);
23
24 auto mem1 = alloc.allocate_node(16u, 1u);
25 REQUIRE(detail::align_offset(mem1, 4u) == 0u);
26 auto mem2 = alloc.allocate_node(16u, 8u);
27 REQUIRE(detail::align_offset(mem2, 4u) == 0u);
28 REQUIRE(detail::align_offset(mem2, 8u) == 0u);
29
30 alloc.deallocate_node(mem2, 16u, 8u);
31 alloc.deallocate_node(mem1, 16u, 1u);
32 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "allocator_traits.hpp"
5
6 #include <catch.hpp>
7
8 #include <type_traits>
9
10 #include "heap_allocator.hpp"
11
12 using namespace foonathan::memory;
13
14 static_assert(is_raw_allocator<std::allocator<char>>::value, "");
15 static_assert(allocator_is_raw_allocator<std::allocator<char>>::value, "");
16 static_assert(is_raw_allocator<std::allocator<int>>::value, "");
17 static_assert(allocator_is_raw_allocator<std::allocator<int>>::value, "");
18 static_assert(is_raw_allocator<heap_allocator>::value, "");
19 static_assert(!is_raw_allocator<int>::value, "");
20
21 template <typename T>
22 struct std_allocator_construct
23 {
24 using value_type = T;
25 using pointer = T;
26
27 void construct(pointer, T)
28 {
29 }
30 };
31
32 static_assert(!allocator_is_raw_allocator<std_allocator_construct<int>>::value, "");
33 static_assert(!is_raw_allocator<std_allocator_construct<int>>::value, "");
34
35 struct raw_allocator_specialized
36 {
37 };
38
39 namespace foonathan
40 {
41 namespace memory
42 {
43 template <>
44 struct allocator_traits<raw_allocator_specialized>
45 {
46 };
47 }
48 } // namespace foonathan::memory
49
50 static_assert(is_raw_allocator<raw_allocator_specialized>::value, "");
51
52 template <class Allocator, class Type, bool Stateful>
53 void test_type_statefulness()
54 {
55 using type = typename allocator_traits<Allocator>::allocator_type;
56 static_assert(std::is_same<type, Type>::value, "allocator_type failed");
57 using stateful = typename allocator_traits<Allocator>::is_stateful;
58 static_assert(stateful::value == Stateful, "is_stateful failed");
59 }
60
61 template <typename T>
62 struct standard_alloc
63 {
64 using value_type = T;
65 };
66
67 template <typename T, typename... Dummy>
68 struct standard_multi_arg
69 {
70 using value_type = T;
71 };
72
73 template <typename T, typename Dummy>
74 struct standard_with_rebind
75 {
76 using value_type = T;
77
78 template <typename U>
79 struct rebind
80 {
81 using other = standard_with_rebind<U, int>;
82 };
83 };
84
85 void instantiate_test_type_statefulness()
86 {
87 struct empty_raw
88 {
89 };
90 test_type_statefulness<empty_raw, empty_raw, false>();
91
92 struct non_empty_raw
93 {
94 int i;
95 };
96 test_type_statefulness<non_empty_raw, non_empty_raw, true>();
97
98 struct explicit_stateful_raw
99 {
100 using is_stateful = std::true_type;
101 };
102 (void)explicit_stateful_raw::is_stateful();
103 test_type_statefulness<explicit_stateful_raw, explicit_stateful_raw, true>();
104
105 struct explicit_stateless_raw
106 {
107 using is_stateful = std::false_type;
108 int i;
109 };
110 (void)explicit_stateless_raw::is_stateful();
111 test_type_statefulness<explicit_stateless_raw, explicit_stateless_raw, false>();
112
113 test_type_statefulness<standard_alloc<char>, standard_alloc<char>, false>();
114 test_type_statefulness<standard_alloc<int>, standard_alloc<char>, false>();
115
116 test_type_statefulness<standard_multi_arg<char, int, int>, standard_multi_arg<char, int, int>,
117 false>();
118 test_type_statefulness<standard_multi_arg<int, int, int>, standard_multi_arg<char, int, int>,
119 false>();
120
121 test_type_statefulness<standard_with_rebind<char, char>, standard_with_rebind<char, int>,
122 false>();
123 test_type_statefulness<standard_with_rebind<int, char>, standard_with_rebind<char, int>,
124 false>();
125 }
126
127 template <class Allocator>
128 void test_node(Allocator& alloc)
129 {
130 auto ptr = allocator_traits<Allocator>::allocate_node(alloc, 1, 1);
131 allocator_traits<Allocator>::deallocate_node(alloc, ptr, 1, 1);
132 }
133
134 template <class Allocator>
135 void test_array(Allocator& alloc)
136 {
137 auto ptr = allocator_traits<Allocator>::allocate_array(alloc, 1, 1, 1);
138 allocator_traits<Allocator>::deallocate_array(alloc, ptr, 1, 1, 1);
139 }
140
141 template <class Allocator>
142 void test_max_getter(const Allocator& alloc, std::size_t alignment, std::size_t node,
143 std::size_t array)
144 {
145 auto i = allocator_traits<Allocator>::max_alignment(alloc);
146 REQUIRE(i == alignment);
147 i = allocator_traits<Allocator>::max_node_size(alloc);
148 REQUIRE(i == node);
149 i = allocator_traits<Allocator>::max_array_size(alloc);
150 REQUIRE(i == array);
151 }
152
153 TEST_CASE("allocator_traits", "[core]")
154 {
155 struct min_raw_allocator
156 {
157 bool alloc_node = false, dealloc_node = false;
158
159 void* allocate_node(std::size_t, std::size_t)
160 {
161 alloc_node = true;
162 return nullptr;
163 }
164
165 void deallocate_node(void*, std::size_t, std::size_t) noexcept
166 {
167 dealloc_node = true;
168 }
169 };
170
171 static_assert(is_raw_allocator<min_raw_allocator>::value, "");
172
173 struct standard_allocator
174 {
175 using value_type = char;
176
177 bool alloc = false, dealloc = false;
178
179 char* allocate(std::size_t)
180 {
181 alloc = true;
182 return nullptr;
183 }
184
185 void deallocate(char*, std::size_t) noexcept
186 {
187 dealloc = true;
188 }
189 };
190
191 static_assert(is_raw_allocator<standard_allocator>::value, "");
192
193 SECTION("node")
194 {
195 // minimum interface works
196 min_raw_allocator min;
197 test_node(min);
198 REQUIRE(min.alloc_node);
199 REQUIRE(min.dealloc_node);
200
201 // standard interface works
202 standard_allocator std;
203 test_node(std);
204 REQUIRE(std.alloc);
205 REQUIRE(std.dealloc);
206
207 struct both_alloc : min_raw_allocator, standard_allocator
208 {
209 };
210
211 static_assert(is_raw_allocator<both_alloc>::value, "");
212
213 // raw is preferred over standard
214 both_alloc both;
215 test_node(both);
216 REQUIRE(both.alloc_node);
217 REQUIRE(both.dealloc_node);
218 REQUIRE(!both.alloc);
219 REQUIRE(!both.dealloc);
220 }
221 SECTION("array")
222 {
223 // minimum interface works
224 min_raw_allocator min;
225 test_array(min);
226 REQUIRE(min.alloc_node);
227 REQUIRE(min.dealloc_node);
228
229 // standard interface works
230 standard_allocator std;
231 test_array(std);
232 REQUIRE(std.alloc);
233 REQUIRE(std.dealloc);
234
235 struct array_raw
236 {
237 bool alloc_array = false, dealloc_array = false;
238
239 void* allocate_array(std::size_t, std::size_t, std::size_t)
240 {
241 alloc_array = true;
242 return nullptr;
243 }
244
245 void deallocate_array(void*, std::size_t, std::size_t, std::size_t) noexcept
246 {
247 dealloc_array = true;
248 }
249 };
250
251 // array works
252 array_raw array;
253 test_array(array);
254 REQUIRE(array.alloc_array);
255 REQUIRE(array.dealloc_array);
256
257 struct array_node : min_raw_allocator, array_raw
258 {
259 };
260 static_assert(is_raw_allocator<array_node>::value, "");
261
262 // array works over node
263 array_node array2;
264 test_array(array2);
265 REQUIRE(array2.alloc_array);
266 REQUIRE(array2.dealloc_array);
267 REQUIRE(!array2.alloc_node);
268 REQUIRE(!array2.dealloc_node);
269
270 struct array_std : standard_allocator, array_raw
271 {
272 };
273 // array works over standard
274 array_std array3;
275 test_array(array3);
276 REQUIRE(array3.alloc_array);
277 REQUIRE(array3.dealloc_array);
278 REQUIRE(!array3.alloc);
279 REQUIRE(!array3.dealloc);
280
281 struct array_node_std : standard_allocator, array_raw, min_raw_allocator
282 {
283 };
284 // array works over everything
285 array_node_std array4;
286 test_array(array4);
287 REQUIRE(array4.alloc_array);
288 REQUIRE(array4.dealloc_array);
289 REQUIRE(!array4.alloc_node);
290 REQUIRE(!array4.dealloc_node);
291 REQUIRE(!array4.alloc);
292 REQUIRE(!array4.dealloc);
293 }
294 SECTION("max getter")
295 {
296 min_raw_allocator min;
297 test_max_getter(min, detail::max_alignment, std::size_t(-1), std::size_t(-1));
298
299 struct with_alignment
300 {
301 std::size_t max_alignment() const
302 {
303 return detail::max_alignment * 2;
304 }
305 };
306 with_alignment alignment;
307 test_max_getter(alignment, detail::max_alignment * 2, std::size_t(-1), std::size_t(-1));
308
309 struct with_node
310 {
311 std::size_t max_node_size() const
312 {
313 return 1;
314 }
315 };
316 with_node node;
317 test_max_getter(node, detail::max_alignment, 1, 1);
318
319 struct with_array
320 {
321 std::size_t max_array_size() const
322 {
323 return 2;
324 }
325 };
326 with_array array;
327 test_max_getter(array, detail::max_alignment, std::size_t(-1), 2);
328
329 struct with_node_array : with_node, with_array
330 {
331 };
332 with_node_array node_array;
333 test_max_getter(node_array, detail::max_alignment, 1, 2);
334
335 struct with_everything : with_node_array, with_alignment
336 {
337 };
338 with_everything everything;
339 test_max_getter(everything, detail::max_alignment * 2, 1, 2);
340 }
341 }
342
343 template <class Allocator>
344 void test_try_node(Allocator& alloc)
345 {
346 auto ptr = composable_allocator_traits<Allocator>::try_allocate_node(alloc, 1, 1);
347 composable_allocator_traits<Allocator>::try_deallocate_node(alloc, ptr, 1, 1);
348 }
349
350 template <class Allocator>
351 void test_try_array(Allocator& alloc)
352 {
353 auto ptr = composable_allocator_traits<Allocator>::try_allocate_array(alloc, 1, 1, 1);
354 composable_allocator_traits<Allocator>::try_deallocate_array(alloc, ptr, 1, 1, 1);
355 }
356
357 TEST_CASE("composable_allocator_traits")
358 {
359 struct min_composable_allocator
360 {
361 bool alloc_node = false, dealloc_node = false;
362
363 void* allocate_node(std::size_t, std::size_t)
364 {
365 return nullptr;
366 }
367
368 void deallocate_node(void*, std::size_t, std::size_t) noexcept
369 {
370 }
371
372 void* try_allocate_node(std::size_t, std::size_t)
373 {
374 alloc_node = true;
375 return nullptr;
376 }
377
378 bool try_deallocate_node(void*, std::size_t, std::size_t) noexcept
379 {
380 dealloc_node = true;
381 return true;
382 }
383 };
384 static_assert(is_composable_allocator<min_composable_allocator>::value, "");
385
386 SECTION("node")
387 {
388 min_composable_allocator alloc;
389 test_try_node(alloc);
390 REQUIRE(alloc.alloc_node);
391 REQUIRE(alloc.dealloc_node);
392 }
393 SECTION("array")
394 {
395 min_composable_allocator min;
396 test_try_array(min);
397 REQUIRE(min.alloc_node);
398 REQUIRE(min.dealloc_node);
399
400 struct array_composable : min_composable_allocator
401 {
402 bool alloc_array = false, dealloc_array = false;
403
404 void* try_allocate_array(std::size_t, std::size_t, std::size_t)
405 {
406 alloc_array = true;
407 return nullptr;
408 }
409
410 bool try_deallocate_array(void*, std::size_t, std::size_t,
411 std::size_t) noexcept
412 {
413 dealloc_array = true;
414 return true;
415 }
416 } array;
417
418 test_try_array(array);
419 REQUIRE(array.alloc_array);
420 REQUIRE(array.dealloc_array);
421 REQUIRE(!array.alloc_node);
422 REQUIRE(!array.dealloc_node);
423 }
424 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_TEST_BENCHMARK_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_TEST_BENCHMARK_HPP_INCLUDED
6
7 // Benchmarking functions and allocator scenarios
8
9 #include <algorithm>
10 #include <chrono>
11 #include <random>
12 #include <vector>
13
14 #include "allocator_traits.hpp"
15
16 using unit = std::chrono::nanoseconds;
17
18 template <typename F, typename... Args>
19 std::size_t measure(F func, Args&&... args)
20 {
21 auto start = std::chrono::system_clock::now();
22 func(std::forward<Args>(args)...);
23 auto duration = std::chrono::duration_cast<unit>(std::chrono::system_clock::now() - start);
24 return std::size_t(duration.count());
25 }
26
27 std::size_t sample_size = 1024u;
28
29 template <typename F, typename Alloc, typename... Args>
30 std::size_t benchmark(F measure_func, Alloc make_alloc, Args&&... args)
31 {
32 auto min_time = std::size_t(-1);
33 for (std::size_t i = 0u; i != sample_size; ++i)
34 {
35 auto alloc = make_alloc();
36 auto time = measure_func(alloc, std::forward<Args>(args)...);
37 if (time < min_time)
38 min_time = time;
39 }
40 return min_time;
41 }
42
43 struct single
44 {
45 std::size_t count;
46
47 single(std::size_t c) : count(c)
48 {
49 }
50
51 template <class RawAllocator>
52 std::size_t operator()(RawAllocator& alloc, std::size_t size)
53 {
54 using namespace foonathan::memory;
55 return measure([&]() {
56 for (std::size_t i = 0u; i != count; ++i)
57 {
58 volatile auto ptr = allocator_traits<RawAllocator>::allocate_node(alloc, size, 1);
59 allocator_traits<RawAllocator>::deallocate_node(alloc, ptr, size, 1);
60 }
61 });
62 }
63
64 template <class RawAllocator>
65 std::size_t operator()(RawAllocator& alloc, std::size_t array_size, std::size_t node_size)
66 {
67 return measure([&]() {
68 for (std::size_t i = 0u; i != count; ++i)
69 {
70 auto ptr =
71 allocator_traits<RawAllocator>::allocate_array(alloc, array_size, node_size, 1);
72 allocator_traits<RawAllocator>::deallocate_array(alloc, ptr, array_size, node_size,
73 1);
74 }
75 });
76 }
77
78 static const char* name()
79 {
80 return "single";
81 }
82 };
83
84 struct basic_bulk
85 {
86 using order_func = void (*)(std::vector<void*>&);
87
88 order_func func;
89 std::size_t count;
90
91 basic_bulk(order_func f, std::size_t c) : func(f), count(c)
92 {
93 }
94
95 template <class RawAllocator>
96 std::size_t operator()(RawAllocator& alloc, std::size_t node_size)
97 {
98 using namespace foonathan::memory;
99
100 std::vector<void*> ptrs;
101 ptrs.reserve(count);
102
103 auto alloc_t = measure([&]() {
104 for (std::size_t i = 0u; i != count; ++i)
105 ptrs.push_back(allocator_traits<RawAllocator>::allocate_node(alloc, node_size, 1));
106 });
107 func(ptrs);
108 auto dealloc_t = measure([&]() {
109 for (auto ptr : ptrs)
110 allocator_traits<RawAllocator>::deallocate_node(alloc, ptr, node_size, 1);
111 });
112 return alloc_t + dealloc_t;
113 }
114
115 template <class RawAllocator>
116 std::size_t operator()(RawAllocator& alloc, std::size_t array_size, std::size_t node_size)
117 {
118 using namespace foonathan::memory;
119
120 std::vector<void*> ptrs;
121 ptrs.reserve(count);
122
123 auto alloc_t = measure([&]() {
124 for (std::size_t i = 0u; i != count; ++i)
125 ptrs.push_back(allocator_traits<RawAllocator>::allocate_array(alloc, array_size,
126 node_size, 1));
127 });
128 func(ptrs);
129 auto dealloc_t = measure([&]() {
130 for (auto ptr : ptrs)
131 allocator_traits<RawAllocator>::deallocate_array(alloc, ptr, array_size, node_size,
132 1);
133 });
134 return alloc_t + dealloc_t;
135 }
136 };
137
138 struct bulk : basic_bulk
139 {
140 bulk(std::size_t c) : basic_bulk([](std::vector<void*>&) {}, c)
141 {
142 }
143
144 static const char* name()
145 {
146 return "bulk";
147 }
148 };
149
150 struct bulk_reversed : basic_bulk
151 {
152 bulk_reversed(std::size_t c)
153 : basic_bulk([](std::vector<void*>& ptrs) { std::reverse(ptrs.begin(), ptrs.end()); }, c)
154 {
155 }
156
157 static const char* name()
158 {
159 return "bulk_reversed";
160 }
161 };
162
163 struct butterfly : basic_bulk
164 {
165 butterfly(std::size_t c)
166 : basic_bulk([](std::vector<void*>&
167 ptrs) { std::shuffle(ptrs.begin(), ptrs.end(), std::mt19937{}); },
168 c)
169 {
170 }
171
172 static const char* name()
173 {
174 return "butterfly\n";
175 }
176 };
177
178 #endif // FOONATHAN_MEMORY_TEST_BENCHMARK_HPP_INCLUDED
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // tests all possible default allocator classes
5
6 #include "default_allocator.hpp"
7
8 #include <catch.hpp>
9
10 #include "detail/align.hpp"
11
12 using namespace foonathan::memory;
13
14 // *very* simple test case to ensure proper alignment and might catch some segfaults
15 template <class Allocator>
16 void check_default_allocator(Allocator& alloc, std::size_t def_alignment = detail::max_alignment)
17 {
18 auto ptr = alloc.allocate_node(1, 1);
19 REQUIRE(detail::is_aligned(ptr, def_alignment));
20
21 alloc.deallocate_node(ptr, 1, 1);
22
23 for (std::size_t i = 0u; i != 10u; ++i)
24 {
25 auto node = alloc.allocate_node(i, 1);
26 REQUIRE(detail::is_aligned(node, def_alignment));
27 alloc.deallocate_node(node, i, 1);
28 }
29
30 std::vector<void*> nodes;
31 for (std::size_t i = 0u; i != 10u; ++i)
32 {
33 auto node = alloc.allocate_node(i, 1);
34 REQUIRE(detail::is_aligned(node, def_alignment));
35 nodes.push_back(node);
36 }
37
38 for (std::size_t i = 0u; i != 10u; ++i)
39 alloc.deallocate_node(nodes[i], i, 1);
40 }
41
42 TEST_CASE("heap_allocator", "[default_allocator]")
43 {
44 heap_allocator alloc;
45 check_default_allocator(alloc);
46 }
47
48 TEST_CASE("new_allocator", "[default_allocator]")
49 {
50 new_allocator alloc;
51 check_default_allocator(alloc);
52 }
53
54 TEST_CASE("malloc_allocator", "[default_allocator]")
55 {
56 malloc_allocator alloc;
57 check_default_allocator(alloc);
58 }
59
60 TEST_CASE("static_allocator", "[default_allocator]")
61 {
62 static_allocator_storage<1024> storage;
63 static_allocator alloc(storage);
64
65 // no need to test alignment issues here again, implemented by fixed_memory_stack
66 check_default_allocator(alloc, 1);
67 }
68
69 TEST_CASE("virtual_memory_allocator", "[default_allocator]")
70 {
71 virtual_memory_allocator alloc;
72 check_default_allocator(alloc, virtual_memory_page_size);
73 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/align.hpp"
5
6 #include <catch.hpp>
7
8 using namespace foonathan::memory;
9 using namespace detail;
10
11 TEST_CASE("detail::align_offset", "[detail][core]")
12 {
13 auto ptr = reinterpret_cast<void*>(0);
14 REQUIRE(align_offset(ptr, 1) == 0u);
15 REQUIRE(align_offset(ptr, 16) == 0u);
16 ptr = reinterpret_cast<void*>(1);
17 REQUIRE(align_offset(ptr, 1) == 0u);
18 REQUIRE(align_offset(ptr, 16) == 15u);
19 ptr = reinterpret_cast<void*>(8);
20 REQUIRE(align_offset(ptr, 4) == 0u);
21 REQUIRE(align_offset(ptr, 8) == 0u);
22 REQUIRE(align_offset(ptr, 16) == 8u);
23 ptr = reinterpret_cast<void*>(16);
24 REQUIRE(align_offset(ptr, 16) == 0u);
25 ptr = reinterpret_cast<void*>(1025);
26 REQUIRE(align_offset(ptr, 16) == 15u);
27 }
28
29 TEST_CASE("detail::is_aligned", "[detail][core]")
30 {
31 auto ptr = reinterpret_cast<void*>(0);
32 REQUIRE(is_aligned(ptr, 1));
33 REQUIRE(is_aligned(ptr, 8));
34 REQUIRE(is_aligned(ptr, 16));
35 ptr = reinterpret_cast<void*>(1);
36 REQUIRE(is_aligned(ptr, 1));
37 REQUIRE(!is_aligned(ptr, 16));
38 ptr = reinterpret_cast<void*>(8);
39 REQUIRE(is_aligned(ptr, 1));
40 REQUIRE(is_aligned(ptr, 4));
41 REQUIRE(is_aligned(ptr, 8));
42 REQUIRE(!is_aligned(ptr, 16));
43 ptr = reinterpret_cast<void*>(16);
44 REQUIRE(is_aligned(ptr, 1));
45 REQUIRE(is_aligned(ptr, 8));
46 REQUIRE(is_aligned(ptr, 16));
47 ptr = reinterpret_cast<void*>(1025);
48 REQUIRE(is_aligned(ptr, 1));
49 REQUIRE(!is_aligned(ptr, 16));
50 }
51
52 TEST_CASE("detail::alignment_for", "[detail][core]")
53 {
54 static_assert(max_alignment >= 8, "test case not working");
55 REQUIRE(alignment_for(1) == 1);
56 REQUIRE(alignment_for(2) == 2);
57 REQUIRE(alignment_for(3) == 2);
58 REQUIRE(alignment_for(4) == 4);
59 REQUIRE(alignment_for(5) == 4);
60 REQUIRE(alignment_for(6) == 4);
61 REQUIRE(alignment_for(7) == 4);
62 REQUIRE(alignment_for(8) == 8);
63 REQUIRE(alignment_for(9) == 8);
64 REQUIRE(alignment_for(100) == max_alignment);
65 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/debug_helpers.hpp"
5
6 #include <catch.hpp>
7
8 #include "debugging.hpp"
9
10 using namespace foonathan::memory;
11 using namespace detail;
12
13 TEST_CASE("detail::debug_fill", "[detail][core]")
14 {
15 debug_magic array[10];
16 for (auto& el : array)
17 el = debug_magic::freed_memory;
18
19 debug_fill(array, sizeof(array), debug_magic::new_memory);
20 #if FOONATHAN_MEMORY_DEBUG_FILL
21 for (auto el : array)
22 REQUIRE(el == debug_magic::new_memory);
23 #else
24 for (auto el : array)
25 REQUIRE(el == debug_magic::freed_memory);
26 #endif
27 }
28
29 TEST_CASE("detail::debug_is_filled", "[detail][core]")
30 {
31 debug_magic array[10];
32 for (auto& el : array)
33 el = debug_magic::freed_memory;
34
35 REQUIRE(debug_is_filled(array, sizeof(array), debug_magic::freed_memory) == nullptr);
36
37 array[5] = debug_magic::new_memory;
38 auto ptr =
39 static_cast<debug_magic*>(debug_is_filled(array, sizeof(array), debug_magic::freed_memory));
40 #if FOONATHAN_MEMORY_DEBUG_FILL
41 REQUIRE(ptr == array + 5);
42 #else
43 REQUIRE(ptr == nullptr);
44 #endif
45 }
46
47 TEST_CASE("detail::debug_fill_new/free", "[detail][core]")
48 {
49 debug_magic array[10];
50
51 auto result = debug_fill_new(array, 8 * sizeof(debug_magic), sizeof(debug_magic));
52 auto offset = static_cast<debug_magic*>(result) - array;
53 auto expected_offset = debug_fence_size ? sizeof(debug_magic) : 0u;
54 REQUIRE(offset == expected_offset);
55
56 #if FOONATHAN_MEMORY_DEBUG_FILL
57 #if FOONATHAN_MEMORY_DEBUG_FENCE
58 REQUIRE(array[0] == debug_magic::fence_memory);
59 REQUIRE(array[9] == debug_magic::fence_memory);
60 const auto start = 1;
61 #else
62 const auto start = 0;
63 #endif
64 for (auto i = start; i < start + 8; ++i)
65 REQUIRE(array[i] == debug_magic::new_memory);
66 #endif
67
68 result = debug_fill_free(result, 8 * sizeof(debug_magic), sizeof(debug_magic));
69 REQUIRE(static_cast<debug_magic*>(result) == array);
70
71 #if FOONATHAN_MEMORY_DEBUG_FILL
72 #if FOONATHAN_MEMORY_DEBUG_FENCE
73 REQUIRE(array[0] == debug_magic::fence_memory);
74 REQUIRE(array[9] == debug_magic::fence_memory);
75 #endif
76 for (auto i = start; i < start + 8; ++i)
77 REQUIRE(array[i] == debug_magic::freed_memory);
78 #endif
79 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/free_list.hpp"
5 #include "detail/small_free_list.hpp"
6
7 #include <algorithm>
8 #include <catch.hpp>
9 #include <random>
10 #include <vector>
11
12 #include "detail/align.hpp"
13 #include "static_allocator.hpp"
14
15 using namespace foonathan::memory;
16 using namespace detail;
17
18 template <class FreeList>
19 void use_list_node(FreeList& list)
20 {
21 std::vector<void*> ptrs;
22 auto capacity = list.capacity();
23 for (std::size_t i = 0u; i != capacity; ++i)
24 {
25 auto ptr = list.allocate();
26 REQUIRE(ptr);
27 REQUIRE(is_aligned(ptr, list.alignment()));
28 ptrs.push_back(ptr);
29 }
30 REQUIRE(list.capacity() == 0u);
31 REQUIRE(list.empty());
32
33 std::shuffle(ptrs.begin(), ptrs.end(), std::mt19937{});
34
35 for (auto p : ptrs)
36 list.deallocate(p);
37 REQUIRE(list.capacity() == capacity);
38 REQUIRE(!list.empty());
39 }
40
41 template <class FreeList>
42 void check_list(FreeList& list, void* memory, std::size_t size)
43 {
44 auto old_cap = list.capacity();
45
46 list.insert(memory, size);
47 REQUIRE(!list.empty());
48 REQUIRE(list.capacity() <= old_cap + size / list.node_size());
49
50 old_cap = list.capacity();
51
52 auto node = list.allocate();
53 REQUIRE(node);
54 REQUIRE(is_aligned(node, list.alignment()));
55 REQUIRE(list.capacity() == old_cap - 1);
56
57 list.deallocate(node);
58 REQUIRE(list.capacity() == old_cap);
59
60 use_list_node(list);
61 }
62
63 template <class FreeList>
64 void check_move(FreeList& list)
65 {
66 static_allocator_storage<1024> memory;
67 list.insert(&memory, 1024);
68
69 auto ptr = list.allocate();
70 REQUIRE(ptr);
71 REQUIRE(is_aligned(ptr, list.alignment()));
72 auto capacity = list.capacity();
73
74 auto list2 = detail::move(list);
75 REQUIRE(list.empty());
76 REQUIRE(list.capacity() == 0u);
77 REQUIRE(!list2.empty());
78 REQUIRE(list2.capacity() == capacity);
79
80 list2.deallocate(ptr);
81
82 static_allocator_storage<1024> memory2;
83 list.insert(&memory2, 1024);
84 REQUIRE(!list.empty());
85 REQUIRE(list.capacity() <= 1024 / list.node_size());
86
87 ptr = list.allocate();
88 REQUIRE(ptr);
89 REQUIRE(is_aligned(ptr, list.alignment()));
90 list.deallocate(ptr);
91
92 ptr = list2.allocate();
93
94 list = detail::move(list2);
95 REQUIRE(list2.empty());
96 REQUIRE(list2.capacity() == 0u);
97 REQUIRE(!list.empty());
98 REQUIRE(list.capacity() == capacity);
99
100 list.deallocate(ptr);
101 }
102
103 TEST_CASE("free_memory_list", "[detail][pool]")
104 {
105 free_memory_list list(4);
106 REQUIRE(list.empty());
107 REQUIRE(list.node_size() >= 4);
108 REQUIRE(list.capacity() == 0u);
109
110 SECTION("normal insert")
111 {
112 static_allocator_storage<1024> memory;
113 check_list(list, &memory, 1024);
114
115 check_move(list);
116 }
117 SECTION("uneven insert")
118 {
119 static_allocator_storage<1023> memory; // not dividable
120 check_list(list, &memory, 1023);
121
122 check_move(list);
123 }
124 SECTION("multiple insert")
125 {
126 static_allocator_storage<1024> a;
127 static_allocator_storage<100> b;
128 static_allocator_storage<1337> c;
129 check_list(list, &a, 1024);
130 check_list(list, &b, 100);
131 check_list(list, &c, 1337);
132
133 check_move(list);
134 }
135 }
136
137 void use_list_array(ordered_free_memory_list& list)
138 {
139 // just hoping to catch segfaults
140
141 auto array = list.allocate(3 * list.node_size());
142 REQUIRE(array);
143 REQUIRE(is_aligned(array, list.alignment()));
144 auto array2 = list.allocate(2 * 3);
145 REQUIRE(array2);
146 REQUIRE(is_aligned(array2, list.alignment()));
147 auto node = list.allocate();
148 REQUIRE(node);
149 REQUIRE(is_aligned(node, list.alignment()));
150
151 list.deallocate(array2, 2 * 3);
152 list.deallocate(node);
153
154 array2 = list.allocate(4 * 10);
155 REQUIRE(array2);
156 REQUIRE(is_aligned(array2, list.alignment()));
157
158 list.deallocate(array, 3 * list.node_size());
159
160 node = list.allocate();
161 REQUIRE(node);
162 REQUIRE(is_aligned(node, list.alignment()));
163 list.deallocate(node);
164
165 list.deallocate(array2, 4 * 10);
166 }
167
168 TEST_CASE("ordered_free_memory_list", "[detail][pool]")
169 {
170 ordered_free_memory_list list(4);
171 REQUIRE(list.empty());
172 REQUIRE(list.node_size() >= 4);
173 REQUIRE(list.capacity() == 0u);
174
175 SECTION("normal insert")
176 {
177 static_allocator_storage<1024> memory;
178 check_list(list, &memory, 1024);
179 use_list_array(list);
180
181 check_move(list);
182 }
183 SECTION("uneven insert")
184 {
185 static_allocator_storage<1023> memory; // not dividable
186 check_list(list, &memory, 1023);
187 use_list_array(list);
188
189 check_move(list);
190 }
191 SECTION("multiple insert")
192 {
193 static_allocator_storage<1024> a;
194 static_allocator_storage<100> b;
195 static_allocator_storage<1337> c;
196 check_list(list, &a, 1024);
197 use_list_array(list);
198 check_list(list, &b, 100);
199 use_list_array(list);
200 check_list(list, &c, 1337);
201 use_list_array(list);
202
203 check_move(list);
204 }
205 }
206
207 TEST_CASE("small_free_memory_list", "[detail][pool]")
208 {
209 small_free_memory_list list(4);
210 REQUIRE(list.empty());
211 REQUIRE(list.node_size() == 4);
212 REQUIRE(list.capacity() == 0u);
213
214 SECTION("normal insert")
215 {
216 static_allocator_storage<1024> memory;
217 check_list(list, &memory, 1024);
218
219 check_move(list);
220 }
221 SECTION("uneven insert")
222 {
223 static_allocator_storage<1023> memory; // not dividable
224 check_list(list, &memory, 1023);
225
226 check_move(list);
227 }
228 SECTION("big insert")
229 {
230 static_allocator_storage<4096> memory; // should use multiple chunks
231 check_list(list, &memory, 4096);
232
233 check_move(list);
234 }
235 SECTION("multiple insert")
236 {
237 static_allocator_storage<1024> a;
238 static_allocator_storage<100> b;
239 static_allocator_storage<1337> c;
240 check_list(list, &a, 1024);
241 check_list(list, &b, 100);
242 check_list(list, &c, 1337);
243
244 check_move(list);
245 }
246 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/free_list_array.hpp"
5
6 #include <catch.hpp>
7
8 #include "detail/free_list.hpp"
9 #include "detail/small_free_list.hpp"
10 #include "static_allocator.hpp"
11
12 using namespace foonathan::memory;
13 using namespace detail;
14
15 TEST_CASE("detail::log2_access_policy", "[detail][pool]")
16 {
17 using ap = detail::log2_access_policy;
18 REQUIRE(ap::index_from_size(1) == 0u);
19 REQUIRE(ap::index_from_size(2) == 1u);
20 REQUIRE(ap::index_from_size(3) == 2u);
21 REQUIRE(ap::index_from_size(4) == 2u);
22 REQUIRE(ap::index_from_size(5) == 3u);
23 REQUIRE(ap::index_from_size(6) == 3u);
24 REQUIRE(ap::index_from_size(8) == 3u);
25 REQUIRE(ap::index_from_size(9) == 4u);
26
27 REQUIRE(ap::size_from_index(0) == 1u);
28 REQUIRE(ap::size_from_index(1) == 2u);
29 REQUIRE(ap::size_from_index(2) == 4u);
30 REQUIRE(ap::size_from_index(3) == 8u);
31 }
32
33 TEST_CASE("detail::free_list_array", "[detail][pool]")
34 {
35 static_allocator_storage<1024> memory;
36 detail::fixed_memory_stack stack(&memory);
37 SECTION("power of two max size, small list")
38 {
39 using array =
40 detail::free_list_array<detail::small_free_memory_list, detail::log2_access_policy>;
41 array arr(stack, stack.top() + 1024, 16);
42 REQUIRE(arr.max_node_size() == 16u);
43 REQUIRE(arr.size() == 5u);
44
45 REQUIRE(arr.get(1u).node_size() == 1u);
46 REQUIRE(arr.get(2u).node_size() == 2u);
47 REQUIRE(arr.get(3u).node_size() == 4u);
48 REQUIRE(arr.get(4u).node_size() == 4u);
49 REQUIRE(arr.get(5u).node_size() == 8u);
50 REQUIRE(arr.get(9u).node_size() == 16u);
51 REQUIRE(arr.get(16u).node_size() == 16u);
52 }
53 SECTION("non power of two max size, small list")
54 {
55 using array =
56 detail::free_list_array<detail::small_free_memory_list, detail::log2_access_policy>;
57 array arr(stack, stack.top() + 1024, 15);
58 REQUIRE(arr.max_node_size() == 16u);
59 REQUIRE(arr.size() == 5u);
60
61 REQUIRE(arr.get(1u).node_size() == 1u);
62 REQUIRE(arr.get(2u).node_size() == 2u);
63 REQUIRE(arr.get(3u).node_size() == 4u);
64 REQUIRE(arr.get(4u).node_size() == 4u);
65 REQUIRE(arr.get(5u).node_size() == 8u);
66 REQUIRE(arr.get(9u).node_size() == 16u);
67 REQUIRE(arr.get(15u).node_size() == 16u);
68 }
69 SECTION("non power of two max size, normal list")
70 {
71 using array = detail::free_list_array<detail::free_memory_list, detail::log2_access_policy>;
72 array arr(stack, stack.top() + 1024, 15);
73 REQUIRE(arr.max_node_size() == 16u);
74 REQUIRE(arr.size() <= 5u);
75
76 REQUIRE(arr.get(1u).node_size() == detail::free_memory_list::min_element_size);
77 REQUIRE(arr.get(2u).node_size() == detail::free_memory_list::min_element_size);
78 REQUIRE(arr.get(9u).node_size() == 16u);
79 REQUIRE(arr.get(15u).node_size() == 16u);
80 }
81 }
0 // Copyright (C) 2016-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/ilog2.hpp"
5
6 #include <catch.hpp>
7
8 using namespace foonathan::memory;
9 using namespace detail;
10
11 TEST_CASE("detail::ilog2()", "[detail][core]")
12 {
13 // Check everything up to 2^16.
14 for (std::size_t i = 0; i != 16; ++i)
15 {
16 auto power = 1u << i;
17 auto next_power = 2 * power;
18 for (auto x = power; x != next_power; ++x)
19 CHECK(ilog2(x) == i);
20 }
21
22 // Check some higher values.
23 CHECK(ilog2(std::uint64_t(1) << 32) == 32);
24 CHECK(ilog2((std::uint64_t(1) << 32) + 44) == 32);
25 CHECK(ilog2((std::uint64_t(1) << 32) + 2048) == 32);
26
27 CHECK(ilog2(std::uint64_t(1) << 48) == 48);
28 CHECK(ilog2((std::uint64_t(1) << 48) + 44) == 48);
29 CHECK(ilog2((std::uint64_t(1) << 48) + 2048) == 48);
30
31 CHECK(ilog2(std::uint64_t(1) << 63) == 63);
32 CHECK(ilog2((std::uint64_t(1) << 63) + 44) == 63);
33 CHECK(ilog2((std::uint64_t(1) << 63) + 2063) == 63);
34 }
35
36 TEST_CASE("detail::ilog2_ceil()", "[detail][core]")
37 {
38 // Check everything up to 2^16.
39 for (std::size_t i = 0; i != 16; ++i)
40 {
41 auto power = 1u << i;
42 CHECK(ilog2_ceil(power) == i);
43
44 auto next_power = 2 * power;
45 for (auto x = power + 1; x != next_power; ++x)
46 CHECK(ilog2_ceil(x) == i + 1);
47 }
48
49 // Check some higher values.
50 CHECK(ilog2_ceil(std::uint64_t(1) << 32) == 32);
51 CHECK(ilog2_ceil((std::uint64_t(1) << 32) + 44) == 33);
52 CHECK(ilog2_ceil((std::uint64_t(1) << 32) + 2048) == 33);
53
54 CHECK(ilog2_ceil(std::uint64_t(1) << 48) == 48);
55 CHECK(ilog2_ceil((std::uint64_t(1) << 48) + 44) == 49);
56 CHECK(ilog2_ceil((std::uint64_t(1) << 48) + 2048) == 49);
57
58 CHECK(ilog2_ceil(std::uint64_t(1) << 63) == 63);
59 CHECK(ilog2_ceil((std::uint64_t(1) << 63) + 44) == 64);
60 CHECK(ilog2_ceil((std::uint64_t(1) << 63) + 2063) == 64);
61 }
62
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "detail/memory_stack.hpp"
5
6 #include <catch.hpp>
7
8 #include "detail/align.hpp"
9 #include "detail/debug_helpers.hpp"
10 #include "detail/utility.hpp"
11 #include "static_allocator.hpp"
12
13 using namespace foonathan::memory;
14 using namespace detail;
15
16 TEST_CASE("detail::fixed_memory_stack", "[detail][stack]")
17 {
18 fixed_memory_stack stack;
19 REQUIRE(stack.top() == nullptr);
20
21 SECTION("allocate")
22 {
23 static_allocator_storage<1024> memory;
24 stack = fixed_memory_stack{&memory};
25 auto end = stack.top() + 1024;
26
27 REQUIRE(stack.top() == reinterpret_cast<char*>(&memory));
28
29 SECTION("alignment for allocate")
30 {
31 auto ptr = stack.allocate(end, 13, 1u);
32 REQUIRE(ptr);
33 REQUIRE(is_aligned(ptr, 1u));
34
35 ptr = stack.allocate(end, 10, 2u);
36 REQUIRE(ptr);
37 REQUIRE(is_aligned(ptr, 2u));
38
39 ptr = stack.allocate(end, 10, max_alignment);
40 REQUIRE(ptr);
41 REQUIRE(is_aligned(ptr, max_alignment));
42
43 ptr = stack.allocate(end, 10, 2 * max_alignment);
44 REQUIRE(ptr);
45 REQUIRE(is_aligned(ptr, 2 * max_alignment));
46 }
47 SECTION("allocate/unwind")
48 {
49 REQUIRE(stack.allocate(end, 10u, 1u));
50 auto diff = std::size_t(stack.top() - reinterpret_cast<char*>(&memory));
51 REQUIRE(diff == 2 * debug_fence_size + 10u);
52
53 REQUIRE(stack.allocate(end, 16u, 1u));
54 auto diff2 = std::size_t(stack.top() - reinterpret_cast<char*>(&memory));
55 REQUIRE(diff2 == 2 * debug_fence_size + 16u + diff);
56
57 stack.unwind(reinterpret_cast<char*>(&memory) + diff);
58 REQUIRE(stack.top() == reinterpret_cast<char*>(&memory) + diff);
59
60 auto top = stack.top();
61 REQUIRE(!stack.allocate(end, 1024, 1));
62 REQUIRE(stack.top() == top);
63 }
64 }
65 SECTION("move")
66 {
67 static_allocator_storage<1024> memory;
68 auto end = reinterpret_cast<char*>(&memory) + 1024;
69
70 fixed_memory_stack other(reinterpret_cast<char*>(&memory));
71 REQUIRE(other.top() == reinterpret_cast<char*>(&memory));
72
73 stack = detail::move(other);
74 REQUIRE(stack.top() == reinterpret_cast<char*>(&memory));
75
76 REQUIRE(!other.allocate(end, 10, 1));
77 REQUIRE(stack.allocate(end, 10, 1));
78 auto top = stack.top();
79
80 other = detail::move(stack);
81 REQUIRE(other.top() == top);
82 REQUIRE(!stack.allocate(end, 10, 1));
83 REQUIRE(other.allocate(end, 10, 1));
84 }
85 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "fallback_allocator.hpp"
5
6 #include <catch.hpp>
7
8 #include "allocator_storage.hpp"
9 #include "test_allocator.hpp"
10
11 using namespace foonathan::memory;
12
13 TEST_CASE("fallback_allocator", "[adapter]")
14 {
15 struct test_compositioning : test_allocator
16 {
17 bool fail = false;
18
19 void* try_allocate_node(std::size_t size, std::size_t alignment)
20 {
21 return fail ? nullptr : allocate_node(size, alignment);
22 }
23
24 bool try_deallocate_node(void* ptr, std::size_t size, std::size_t alignment)
25 {
26 if (fail)
27 return false;
28 deallocate_node(ptr, size, alignment);
29 return true;
30 }
31 } default_alloc;
32 test_allocator fallback_alloc;
33
34 using allocator = fallback_allocator<allocator_reference<test_compositioning>,
35 allocator_reference<test_allocator>>;
36
37 allocator alloc(default_alloc, fallback_alloc);
38 REQUIRE(default_alloc.no_allocated() == 0u);
39 REQUIRE(fallback_alloc.no_allocated() == 0u);
40
41 auto ptr = alloc.allocate_node(1, 1);
42 REQUIRE(default_alloc.no_allocated() == 1u);
43 REQUIRE(fallback_alloc.no_allocated() == 0u);
44
45 alloc.deallocate_node(ptr, 1, 1);
46 REQUIRE(default_alloc.no_deallocated() == 1u);
47 REQUIRE(fallback_alloc.no_deallocated() == 0u);
48
49 default_alloc.fail = true;
50
51 ptr = alloc.allocate_node(1, 1);
52 REQUIRE(default_alloc.no_allocated() == 0u);
53 REQUIRE(fallback_alloc.no_allocated() == 1u);
54
55 alloc.deallocate_node(ptr, 1, 1);
56 REQUIRE(default_alloc.no_deallocated() == 1u);
57 REQUIRE(fallback_alloc.no_deallocated() == 1u);
58 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "iteration_allocator.hpp"
5
6 #include <catch.hpp>
7
8 #include "allocator_storage.hpp"
9 #include "test_allocator.hpp"
10
11 using namespace foonathan::memory;
12
13 TEST_CASE("iteration_allocator", "[stack]")
14 {
15 SECTION("basic")
16 {
17 test_allocator alloc;
18 iteration_allocator<2, allocator_reference<test_allocator>> iter_alloc(100, alloc);
19 REQUIRE(alloc.no_allocated() == 1u);
20 REQUIRE(iter_alloc.max_iterations() == 2u);
21 REQUIRE(iter_alloc.cur_iteration() == 0u);
22 REQUIRE(iter_alloc.capacity_left(0u) == 50);
23 REQUIRE(iter_alloc.capacity_left(1u) == 50);
24
25 iter_alloc.allocate(10, 1);
26 REQUIRE(iter_alloc.capacity_left() < 50);
27 iter_alloc.allocate(4, 4);
28 REQUIRE(iter_alloc.capacity_left() < 50);
29
30 REQUIRE(iter_alloc.capacity_left(1u) == 50);
31 iter_alloc.next_iteration();
32 REQUIRE(iter_alloc.cur_iteration() == 1u);
33 REQUIRE(iter_alloc.capacity_left() == 50);
34 REQUIRE(iter_alloc.capacity_left(0u) < 50);
35
36 iter_alloc.allocate(10, 1);
37 REQUIRE(iter_alloc.capacity_left() < 50);
38
39 iter_alloc.next_iteration();
40 REQUIRE(iter_alloc.cur_iteration() == 0u);
41 REQUIRE(iter_alloc.capacity_left() == 50);
42 REQUIRE(iter_alloc.capacity_left(1u) < 50);
43
44 iter_alloc.next_iteration();
45 REQUIRE(iter_alloc.cur_iteration() == 1u);
46 REQUIRE(iter_alloc.capacity_left() == 50);
47 }
48 SECTION("overaligned")
49 {
50 test_allocator alloc;
51 iteration_allocator<1, allocator_reference<test_allocator>> iter_alloc(100, alloc);
52
53 auto align = 2 * detail::max_alignment;
54 auto mem = iter_alloc.allocate(align, align);
55 REQUIRE(detail::is_aligned(mem, align));
56 }
57 }
0 // Copyright (C) 2016 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "joint_allocator.hpp"
5
6 #include <catch.hpp>
7
8 #include "container.hpp"
9 #include "test_allocator.hpp"
10
11 using namespace foonathan::memory;
12
13 template <typename T, class RawAllocator>
14 void verify(const joint_ptr<T, RawAllocator>& ptr, const RawAllocator& alloc, int value)
15 {
16 REQUIRE(ptr);
17 REQUIRE(ptr.get());
18 REQUIRE(ptr.get() == ptr.operator->());
19 REQUIRE(ptr.get() == &*ptr);
20 REQUIRE(&ptr.get_allocator() == &alloc);
21 REQUIRE(ptr->value == value);
22 }
23
24 template <typename T, class RawAllocator>
25 void verify_null(const joint_ptr<T, RawAllocator>& ptr, const RawAllocator& alloc)
26 {
27 REQUIRE(!ptr);
28 REQUIRE(ptr.get() == nullptr);
29 REQUIRE(&ptr.get_allocator() == &alloc);
30 }
31
32 TEST_CASE("joint_ptr", "[allocator]")
33 {
34 struct joint_test : joint_type<joint_test>
35 {
36 int value;
37
38 joint_test(joint tag, int v) : joint_type(tag), value(v) {}
39
40 joint_test(joint tag, const joint_test& other) : joint_type(tag), value(other.value) {}
41 };
42
43 test_allocator alloc;
44
45 SECTION("allocator constructor")
46 {
47 joint_ptr<joint_test, test_allocator> ptr(alloc);
48 verify_null(ptr, alloc);
49
50 REQUIRE(alloc.no_allocated() == 0u);
51 REQUIRE(alloc.no_deallocated() == 0u);
52 }
53 SECTION("creation constructor")
54 {
55 joint_ptr<joint_test, test_allocator> ptr(alloc, joint_size(10u), 5);
56 verify(ptr, alloc, 5);
57
58 REQUIRE(alloc.no_allocated() == 1u);
59 REQUIRE(alloc.last_allocated().size == sizeof(joint_test) + 10u);
60 REQUIRE(alloc.last_allocated().alignment == alignof(joint_test));
61 REQUIRE(alloc.no_deallocated() == 0u);
62 }
63 SECTION("move constructor")
64 {
65 auto ptr1 = allocate_joint<joint_test>(alloc, joint_size(10u), 5);
66 verify(ptr1, alloc, 5);
67
68 auto ptr2 = std::move(ptr1);
69 verify_null(ptr1, alloc);
70 verify(ptr2, alloc, 5);
71
72 auto ptr3 = std::move(ptr1);
73 verify_null(ptr1, alloc);
74
75 REQUIRE(alloc.no_allocated() == 1u);
76 }
77 SECTION("move assignment")
78 {
79 joint_ptr<joint_test, test_allocator> ptr1(alloc);
80 verify_null(ptr1, alloc);
81
82 auto ptr2 = allocate_joint<joint_test>(alloc, joint_size(10u), 5);
83 verify(ptr2, alloc, 5);
84 REQUIRE(alloc.no_allocated() == 1u);
85
86 ptr1 = std::move(ptr2);
87 verify_null(ptr2, alloc);
88 verify(ptr1, alloc, 5);
89 REQUIRE(alloc.no_allocated() == 1u);
90
91 ptr1 = std::move(ptr2);
92 verify_null(ptr1, alloc);
93 verify_null(ptr2, alloc);
94 REQUIRE(alloc.no_allocated() == 0u);
95 }
96 SECTION("swap")
97 {
98 joint_ptr<joint_test, test_allocator> ptr1(alloc);
99 verify_null(ptr1, alloc);
100
101 auto ptr2 = allocate_joint<joint_test>(alloc, joint_size(10u), 5);
102 verify(ptr2, alloc, 5);
103
104 swap(ptr1, ptr2);
105 verify(ptr1, alloc, 5);
106 verify_null(ptr2, alloc);
107
108 REQUIRE(alloc.no_allocated() == 1u);
109 }
110 SECTION("reset")
111 {
112 joint_ptr<joint_test, test_allocator> ptr1(alloc);
113 verify_null(ptr1, alloc);
114 ptr1.reset();
115 verify_null(ptr1, alloc);
116
117 auto ptr2 = allocate_joint<joint_test>(alloc, joint_size(10u), 5);
118 verify(ptr2, alloc, 5);
119 REQUIRE(alloc.no_allocated() == 1u);
120
121 ptr2.reset();
122 verify_null(ptr2, alloc);
123 REQUIRE(alloc.no_allocated() == 0u);
124 }
125 SECTION("compare")
126 {
127 joint_ptr<joint_test, test_allocator> ptr1(alloc);
128 verify_null(ptr1, alloc);
129
130 auto ptr2 = allocate_joint<joint_test>(alloc, joint_size(10u), 5);
131 verify(ptr2, alloc, 5);
132
133 REQUIRE(ptr1 == nullptr);
134 REQUIRE(nullptr == ptr1);
135 REQUIRE_FALSE(ptr1 != nullptr);
136 REQUIRE_FALSE(nullptr != ptr1);
137
138 REQUIRE_FALSE(ptr1 == ptr2.get());
139 REQUIRE_FALSE(ptr2.get() == ptr1);
140 REQUIRE(ptr1 != ptr2.get());
141 REQUIRE(ptr2.get() != ptr1);
142
143 REQUIRE_FALSE(ptr2 == nullptr);
144 REQUIRE_FALSE(nullptr == ptr2);
145 REQUIRE(ptr2 != nullptr);
146 REQUIRE(nullptr != ptr2);
147
148 REQUIRE(ptr2 == ptr2.get());
149 REQUIRE(ptr2.get() == ptr2);
150 REQUIRE_FALSE(ptr2 != ptr2.get());
151 REQUIRE_FALSE(ptr2.get() != ptr2);
152 }
153 SECTION("clone")
154 {
155 auto ptr1 = allocate_joint<joint_test>(alloc, joint_size(10u), 5);
156 verify(ptr1, alloc, 5);
157
158 REQUIRE(alloc.no_allocated() == 1u);
159 REQUIRE(alloc.last_allocated().size == sizeof(joint_test) + 10u);
160 REQUIRE(alloc.last_allocated().alignment == alignof(joint_test));
161 REQUIRE(alloc.no_deallocated() == 0u);
162
163 auto ptr2 = clone_joint(alloc, *ptr1);
164 verify(ptr2, alloc, 5);
165
166 REQUIRE(alloc.no_allocated() == 2u);
167 REQUIRE(alloc.last_allocated().size == sizeof(joint_test));
168 REQUIRE(alloc.last_allocated().alignment == alignof(joint_test));
169 REQUIRE(alloc.no_deallocated() == 0u);
170 }
171
172 REQUIRE(alloc.no_allocated() == 0u);
173 }
174
175 TEST_CASE("joint_allocator", "[allocator]")
176 {
177 struct joint_test : joint_type<joint_test>
178 {
179 vector<int, joint_allocator> vec;
180 int value;
181
182 joint_test(joint tag, int val, std::size_t size) : joint_type(tag), vec(*this), value(val)
183 {
184 vec.reserve(size);
185 vec.push_back(42);
186 vec.push_back(-1);
187 }
188 };
189
190 test_allocator alloc;
191
192 auto ptr = allocate_joint<joint_test>(alloc, joint_size(10 * sizeof(int)), 5, 3);
193 verify(ptr, alloc, 5);
194 REQUIRE(ptr->vec[0] == 42);
195 REQUIRE(ptr->vec[1] == -1);
196
197 ptr->vec.push_back(5);
198 REQUIRE(ptr->vec.back() == 5);
199
200 REQUIRE(alloc.no_allocated() == 1u);
201 REQUIRE(alloc.last_allocated().size == sizeof(joint_test) + 10 * sizeof(int));
202 REQUIRE(alloc.last_allocated().alignment == alignof(joint_test));
203 }
204
205 template <typename T>
206 void verify(const joint_array<T>& array, std::size_t size)
207 {
208 REQUIRE(array.data() == array.begin());
209 REQUIRE(array.size() == size);
210 REQUIRE(!array.empty());
211
212 auto iter = array.begin();
213 for (auto i = 0u; i != array.size(); ++i)
214 {
215 REQUIRE(&array[i] == array.data() + i);
216 REQUIRE(&array[i] == iter);
217 REQUIRE(iter - array.begin() == i);
218 ++iter;
219 }
220 }
221
222 TEST_CASE("joint_array", "[allocator]")
223 {
224 struct joint_test : joint_type<joint_test>
225 {
226 int value;
227
228 joint_test(joint tag, int v) : joint_type(tag), value(v) {}
229 };
230
231 test_allocator alloc;
232 auto ptr = allocate_joint<joint_test>(alloc, joint_size(20 * sizeof(int)), 5);
233 verify(ptr, alloc, 5);
234
235 SECTION("size constructor")
236 {
237 joint_array<int> arr(5, *ptr);
238 verify(arr, 5);
239 for (auto el : arr)
240 REQUIRE(el == 0);
241 }
242 SECTION("size value constructor")
243 {
244 joint_array<int> arr(5, 1, *ptr);
245 verify(arr, 5);
246 for (auto el : arr)
247 REQUIRE(el == 1);
248 }
249 SECTION("ilist constructor")
250 {
251 joint_array<int> arr({1, 2, 3}, *ptr);
252 verify(arr, 3);
253 REQUIRE(arr[0] == 1);
254 REQUIRE(arr[1] == 2);
255 REQUIRE(arr[2] == 3);
256 }
257 SECTION("iterator constructor")
258 {
259 int input[] = {1, 2, 3};
260 joint_array<int> arr(std::begin(input), std::end(input), *ptr);
261 verify(arr, 3);
262 REQUIRE(arr[0] == 1);
263 REQUIRE(arr[1] == 2);
264 REQUIRE(arr[2] == 3);
265 }
266 SECTION("copy/move constructor")
267 {
268 joint_array<int> arr1({1, 2, 3}, *ptr);
269 verify(arr1, 3);
270
271 joint_array<int> arr2(arr1, *ptr);
272 verify(arr2, 3);
273 REQUIRE(arr2[0] == 1);
274 REQUIRE(arr2[1] == 2);
275 REQUIRE(arr2[2] == 3);
276 }
277 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_arena.hpp"
5
6 #include <catch.hpp>
7
8 #include "static_allocator.hpp"
9
10 using namespace foonathan::memory;
11 using namespace detail;
12
13 TEST_CASE("detail::memory_block_stack", "[detail][arena]")
14 {
15 memory_block_stack stack;
16 REQUIRE(stack.empty());
17
18 static_allocator_storage<1024> memory;
19
20 stack.push({&memory, 1024});
21 REQUIRE(!stack.empty());
22
23 auto top = stack.top();
24 REQUIRE(top.memory >= static_cast<void*>(&memory));
25 REQUIRE(top.size <= 1024);
26 REQUIRE(is_aligned(top.memory, max_alignment));
27
28 SECTION("pop")
29 {
30 auto block = stack.pop();
31 REQUIRE(block.size == 1024);
32 REQUIRE(block.memory == static_cast<void*>(&memory));
33 }
34 SECTION("steal_top")
35 {
36 memory_block_stack other;
37
38 other.steal_top(stack);
39 REQUIRE(stack.empty());
40 REQUIRE(!other.empty());
41
42 auto other_top = other.top();
43 REQUIRE(other_top.memory >= static_cast<void*>(&memory));
44 REQUIRE(other_top.size <= 1024);
45 REQUIRE(is_aligned(other_top.memory, max_alignment));
46 }
47
48 static_allocator_storage<1024> a, b, c;
49 stack.push({&a, 1024});
50 stack.push({&b, 1024});
51 stack.push({&c, 1024});
52
53 SECTION("multiple pop")
54 {
55 auto block = stack.pop();
56 REQUIRE(block.memory == static_cast<void*>(&c));
57 block = stack.pop();
58 REQUIRE(block.memory == static_cast<void*>(&b));
59 block = stack.pop();
60 REQUIRE(block.memory == static_cast<void*>(&a));
61 block = stack.pop();
62 REQUIRE(block.memory == static_cast<void*>(&memory));
63 }
64 SECTION("multiple steal_from")
65 {
66 memory_block_stack other;
67
68 other.steal_top(stack);
69 other.steal_top(stack);
70 other.steal_top(stack);
71 other.steal_top(stack);
72
73 REQUIRE(stack.empty());
74
75 auto block = other.pop();
76 REQUIRE(block.memory == static_cast<void*>(&memory));
77 block = other.pop();
78 REQUIRE(block.memory == static_cast<void*>(&a));
79 block = other.pop();
80 REQUIRE(block.memory == static_cast<void*>(&b));
81 block = other.pop();
82 REQUIRE(block.memory == static_cast<void*>(&c));
83 }
84 SECTION("move")
85 {
86 memory_block_stack other = detail::move(stack);
87 REQUIRE(stack.empty());
88 REQUIRE(!other.empty());
89
90 auto block = other.pop();
91 REQUIRE(block.memory == static_cast<void*>(&c));
92
93 stack = detail::move(other);
94 REQUIRE(other.empty());
95 REQUIRE(!stack.empty());
96
97 block = stack.pop();
98 REQUIRE(block.memory == static_cast<void*>(&b));
99 }
100 }
101
102 template <std::size_t N>
103 struct test_block_allocator
104 {
105 static_allocator_storage<1024> blocks[N];
106 std::size_t i = 0;
107
108 test_block_allocator(std::size_t) {}
109
110 ~test_block_allocator()
111 {
112 REQUIRE(i == 0u);
113 }
114
115 memory_block allocate_block()
116 {
117 REQUIRE(i < N);
118 return {&blocks[i++], 1024};
119 }
120
121 void deallocate_block(memory_block b)
122 {
123 REQUIRE(static_cast<void*>(&blocks[i - 1]) == b.memory);
124 --i;
125 }
126
127 std::size_t next_block_size() const
128 {
129 return 1024;
130 }
131 };
132
133 TEST_CASE("memory_arena w/ caching", "[arena]")
134 {
135 memory_arena<test_block_allocator<10>> arena(1024);
136 REQUIRE(arena.get_allocator().i == 0u);
137 REQUIRE(arena.size() == 0u);
138 REQUIRE(arena.capacity() == 0u);
139
140 arena.allocate_block();
141 REQUIRE(arena.get_allocator().i == 1u);
142 REQUIRE(arena.size() == 1u);
143 REQUIRE(arena.capacity() == 1u);
144
145 arena.allocate_block();
146 REQUIRE(arena.get_allocator().i == 2u);
147 REQUIRE(arena.size() == 2u);
148 REQUIRE(arena.capacity() == 2u);
149
150 arena.deallocate_block();
151 REQUIRE(arena.get_allocator().i == 2u);
152 REQUIRE(arena.size() == 1u);
153 REQUIRE(arena.capacity() == 2u);
154
155 arena.allocate_block();
156 REQUIRE(arena.get_allocator().i == 2u);
157 REQUIRE(arena.size() == 2u);
158 REQUIRE(arena.capacity() == 2u);
159
160 arena.deallocate_block();
161 arena.deallocate_block();
162 REQUIRE(arena.get_allocator().i == 2u);
163 REQUIRE(arena.size() == 0u);
164 REQUIRE(arena.capacity() == 2u);
165
166 arena.shrink_to_fit();
167 REQUIRE(arena.get_allocator().i == 0u);
168 REQUIRE(arena.size() == 0u);
169 REQUIRE(arena.capacity() == 0u);
170
171 arena.allocate_block();
172 REQUIRE(arena.get_allocator().i == 1u);
173 REQUIRE(arena.size() == 1u);
174 REQUIRE(arena.capacity() == 1u);
175 }
176
177 TEST_CASE("memory_arena w/o caching", "[arena]")
178 {
179 memory_arena<test_block_allocator<10>, false> arena(1024);
180 REQUIRE(arena.get_allocator().i == 0u);
181 REQUIRE(arena.size() == 0u);
182 REQUIRE(arena.capacity() == 0u);
183
184 arena.allocate_block();
185 REQUIRE(arena.get_allocator().i == 1u);
186 REQUIRE(arena.size() == 1u);
187 REQUIRE(arena.capacity() == 1u);
188
189 arena.allocate_block();
190 REQUIRE(arena.get_allocator().i == 2u);
191 REQUIRE(arena.size() == 2u);
192 REQUIRE(arena.capacity() == 2u);
193
194 arena.deallocate_block();
195 REQUIRE(arena.get_allocator().i == 1u);
196 REQUIRE(arena.size() == 1u);
197 REQUIRE(arena.capacity() == 1u);
198
199 arena.allocate_block();
200 REQUIRE(arena.get_allocator().i == 2u);
201 REQUIRE(arena.size() == 2u);
202 REQUIRE(arena.capacity() == 2u);
203
204 arena.deallocate_block();
205 arena.deallocate_block();
206 REQUIRE(arena.get_allocator().i == 0u);
207 REQUIRE(arena.size() == 0u);
208 REQUIRE(arena.capacity() == 0u);
209
210 arena.allocate_block();
211 REQUIRE(arena.get_allocator().i == 1u);
212 REQUIRE(arena.size() == 1u);
213 REQUIRE(arena.capacity() == 1u);
214 }
215
216 static_assert(
217 std::is_same<growing_block_allocator<>,
218 foonathan::memory::make_block_allocator_t<growing_block_allocator<>>>::value,
219 "");
220 static_assert(std::is_same<growing_block_allocator<>,
221 foonathan::memory::make_block_allocator_t<default_allocator>>::value,
222 "");
223
224 template <class RawAlloc>
225 using block_wrapper = growing_block_allocator<RawAlloc>;
226
227 TEST_CASE("make_block_allocator", "[arena]")
228 {
229 growing_block_allocator<heap_allocator> a1 = make_block_allocator<heap_allocator>(1024);
230 REQUIRE(a1.next_block_size() == 1024);
231
232 growing_block_allocator<heap_allocator> a2 =
233 make_block_allocator<block_wrapper, heap_allocator>(1024);
234 REQUIRE(a2.next_block_size() == 1024);
235 }
236
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_pool.hpp"
5
6 #include <algorithm>
7 #include <catch.hpp>
8 #include <random>
9 #include <vector>
10
11 #include "allocator_storage.hpp"
12 #include "test_allocator.hpp"
13
14 using namespace foonathan::memory;
15
16 // don't test actual node allocationg, but the connection between arena and the implementation
17 // so only test for memory_pool<node_pool>
18 TEST_CASE("memory_pool", "[pool]")
19 {
20 using pool_type = memory_pool<node_pool, allocator_reference<test_allocator>>;
21 test_allocator alloc;
22 {
23 pool_type pool(4, pool_type::min_block_size(4, 25), alloc);
24 REQUIRE(pool.node_size() >= 4u);
25 REQUIRE(pool.capacity_left() >= 25 * 4u);
26 REQUIRE(pool.next_capacity() >= 25 * 4u);
27 REQUIRE(alloc.no_allocated() == 1u);
28
29 SECTION("normal alloc/dealloc")
30 {
31 std::vector<void*> ptrs;
32 auto capacity = pool.capacity_left();
33 REQUIRE(capacity / 4 >= 25);
34 for (std::size_t i = 0u; i != 25; ++i)
35 ptrs.push_back(pool.allocate_node());
36 REQUIRE(pool.capacity_left() == 0u);
37 REQUIRE(alloc.no_allocated() == 1u);
38
39 std::shuffle(ptrs.begin(), ptrs.end(), std::mt19937{});
40
41 for (auto ptr : ptrs)
42 pool.deallocate_node(ptr);
43 REQUIRE(pool.capacity_left() == capacity);
44 }
45 SECTION("multiple block alloc/dealloc")
46 {
47 std::vector<void*> ptrs;
48 auto capacity = pool.capacity_left();
49 for (std::size_t i = 0u; i != capacity / pool.node_size(); ++i)
50 ptrs.push_back(pool.allocate_node());
51 REQUIRE(pool.capacity_left() == 0u);
52
53 ptrs.push_back(pool.allocate_node());
54 REQUIRE(pool.capacity_left() >= capacity - pool.node_size());
55 REQUIRE(alloc.no_allocated() == 2u);
56
57 std::shuffle(ptrs.begin(), ptrs.end(), std::mt19937{});
58
59 for (auto ptr : ptrs)
60 pool.deallocate_node(ptr);
61 REQUIRE(pool.capacity_left() >= capacity);
62 REQUIRE(alloc.no_allocated() == 2u);
63 }
64 }
65 REQUIRE(alloc.no_allocated() == 0u);
66 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_pool_collection.hpp"
5
6 #include <algorithm>
7 #include <catch.hpp>
8 #include <random>
9 #include <vector>
10
11 #include "allocator_storage.hpp"
12 #include "test_allocator.hpp"
13
14 using namespace foonathan::memory;
15 using namespace detail;
16
17 TEST_CASE("memory_pool_collection", "[pool]")
18 {
19 using pools =
20 memory_pool_collection<node_pool, identity_buckets, allocator_reference<test_allocator>>;
21 test_allocator alloc;
22 {
23 const auto max_size = 16u;
24 pools pool(max_size, 2000, alloc);
25 REQUIRE(pool.max_node_size() == max_size);
26 REQUIRE(pool.capacity_left() <= 2000u);
27 REQUIRE(pool.next_capacity() >= 2000u);
28 REQUIRE(alloc.no_allocated() == 1u);
29
30 for (auto i = 0u; i != max_size; ++i)
31 REQUIRE(pool.pool_capacity_left(i) == 0u);
32
33 SECTION("normal alloc/dealloc")
34 {
35 std::vector<void*> a, b;
36 for (auto i = 0u; i != 5u; ++i)
37 {
38 a.push_back(pool.allocate_node(1));
39 b.push_back(pool.try_allocate_node(5));
40 REQUIRE(b.back());
41 }
42 REQUIRE(alloc.no_allocated() == 1u);
43 REQUIRE(pool.capacity_left() <= 2000u);
44
45 std::shuffle(a.begin(), a.end(), std::mt19937{});
46 std::shuffle(b.begin(), b.end(), std::mt19937{});
47
48 for (auto ptr : a)
49 REQUIRE(pool.try_deallocate_node(ptr, 1));
50 for (auto ptr : b)
51 pool.deallocate_node(ptr, 5);
52 }
53 SECTION("multiple block alloc/dealloc")
54 {
55 std::vector<void*> a, b;
56 for (auto i = 0u; i != 1000u; ++i)
57 {
58 a.push_back(pool.allocate_node(1));
59 b.push_back(pool.allocate_node(5));
60 }
61 REQUIRE(alloc.no_allocated() > 1u);
62
63 std::shuffle(a.begin(), a.end(), std::mt19937{});
64 std::shuffle(b.begin(), b.end(), std::mt19937{});
65
66 for (auto ptr : a)
67 pool.deallocate_node(ptr, 1);
68 for (auto ptr : b)
69 pool.deallocate_node(ptr, 5);
70 }
71 }
72 REQUIRE(alloc.no_allocated() == 0u);
73 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_resource_adapter.hpp"
5
6 #include <catch.hpp>
7 #include <new>
8
9 #include "allocator_storage.hpp"
10 #include "std_allocator.hpp"
11
12 using namespace foonathan::memory;
13
14 struct pmr_test_allocator
15 {
16 std::size_t node_allocated = 0, array_allocated = 0;
17
18 void* allocate_node(std::size_t size, std::size_t)
19 {
20 node_allocated += size;
21 return ::operator new(size);
22 }
23
24 void* allocate_array(std::size_t n, std::size_t size, std::size_t)
25 {
26 array_allocated += n * size;
27 return ::operator new(n* size);
28 }
29
30 void deallocate_node(void* p, std::size_t size, std::size_t)
31 {
32 node_allocated -= size;
33 ::operator delete(p);
34 }
35
36 void deallocate_array(void* p, std::size_t n, std::size_t size, std::size_t)
37 {
38 array_allocated -= n * size;
39 ::operator delete(p);
40 }
41
42 std::size_t max_node_size() const
43 {
44 return 8u;
45 }
46 };
47
48 TEST_CASE("memory_resource_adapter", "[adapter]")
49 {
50 auto max_node = pmr_test_allocator{}.max_node_size();
51
52 memory_resource_adapter<pmr_test_allocator> alloc({});
53 REQUIRE(alloc.get_allocator().node_allocated == 0u);
54 REQUIRE(alloc.get_allocator().array_allocated == 0u);
55
56 auto mem = alloc.allocate(max_node / 2);
57 REQUIRE(alloc.get_allocator().node_allocated == max_node / 2);
58 REQUIRE(alloc.get_allocator().array_allocated == 0u);
59
60 alloc.deallocate(mem, max_node / 2);
61 REQUIRE(alloc.get_allocator().node_allocated == 0);
62 REQUIRE(alloc.get_allocator().array_allocated == 0u);
63
64 mem = alloc.allocate(max_node);
65 REQUIRE(alloc.get_allocator().node_allocated == max_node);
66 REQUIRE(alloc.get_allocator().array_allocated == 0u);
67
68 alloc.deallocate(mem, max_node);
69 REQUIRE(alloc.get_allocator().node_allocated == 0);
70 REQUIRE(alloc.get_allocator().array_allocated == 0u);
71
72 mem = alloc.allocate(max_node * 2);
73 REQUIRE(alloc.get_allocator().node_allocated == 0u);
74 REQUIRE(alloc.get_allocator().array_allocated == max_node * 2);
75
76 alloc.deallocate(mem, max_node * 2);
77 REQUIRE(alloc.get_allocator().node_allocated == 0u);
78 REQUIRE(alloc.get_allocator().array_allocated == 0u);
79
80 mem = alloc.allocate(max_node * 2 + 1);
81 REQUIRE(alloc.get_allocator().node_allocated == 0u);
82 REQUIRE(alloc.get_allocator().array_allocated == max_node * 3);
83
84 alloc.deallocate(mem, max_node * 2 + 1);
85 REQUIRE(alloc.get_allocator().node_allocated == 0u);
86 REQUIRE(alloc.get_allocator().array_allocated == 0u);
87 }
88
89 // compilation checks
90 template class foonathan::memory::allocator_storage<reference_storage<memory_resource_allocator>,
91 no_mutex>;
92 template class foonathan::memory::allocator_traits<memory_resource_allocator>;
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "memory_stack.hpp"
5
6 #include <catch.hpp>
7
8 #include "allocator_storage.hpp"
9 #include "test_allocator.hpp"
10
11 using namespace foonathan::memory;
12
13 TEST_CASE("memory_stack", "[stack]")
14 {
15 test_allocator alloc;
16
17 using stack_type = memory_stack<allocator_reference<test_allocator>>;
18 stack_type stack(stack_type::min_block_size(100), alloc);
19 REQUIRE(alloc.no_allocated() == 1u);
20 REQUIRE(stack.capacity_left() == 100);
21 auto capacity = stack.capacity_left();
22
23 SECTION("empty unwind")
24 {
25 auto m = stack.top();
26 stack.unwind(m);
27 REQUIRE(capacity <= 100);
28 REQUIRE(alloc.no_allocated() == 1u);
29 REQUIRE(alloc.no_deallocated() == 0u);
30 }
31 SECTION("normal allocation/unwind")
32 {
33 stack.allocate(10, 1);
34 REQUIRE(stack.capacity_left() == capacity - 10 - 2 * detail::debug_fence_size);
35
36 auto m = stack.top();
37
38 auto memory = stack.allocate(10, 16);
39 REQUIRE(detail::is_aligned(memory, 16));
40
41 stack.unwind(m);
42 REQUIRE(stack.capacity_left() == capacity - 10 - 2 * detail::debug_fence_size);
43
44 REQUIRE(stack.allocate(10, 16) == memory);
45 REQUIRE(alloc.no_allocated() == 1u);
46 REQUIRE(alloc.no_deallocated() == 0u);
47 }
48 SECTION("multiple block allocation/unwind")
49 {
50 // note: tests are mostly hoping not to get a segfault
51
52 stack.allocate(10, 1);
53 auto m = stack.top();
54
55 auto old_next = stack.next_capacity();
56
57 stack.allocate(100, 1);
58 REQUIRE(stack.next_capacity() > old_next);
59 REQUIRE(alloc.no_allocated() == 2u);
60 REQUIRE(alloc.no_deallocated() == 0u);
61
62 auto m2 = stack.top();
63 REQUIRE(m < m2);
64 stack.allocate(10, 1);
65 stack.unwind(m2);
66 stack.allocate(20, 1);
67
68 stack.unwind(m);
69 REQUIRE(alloc.no_allocated() == 2u);
70 REQUIRE(alloc.no_deallocated() == 0u);
71
72 stack.allocate(10, 1);
73
74 stack.shrink_to_fit();
75 REQUIRE(alloc.no_allocated() == 1u);
76 REQUIRE(alloc.no_deallocated() == 1u);
77 }
78 SECTION("move")
79 {
80 auto other = detail::move(stack);
81 auto m = other.top();
82 other.allocate(10, 1);
83 REQUIRE(alloc.no_allocated() == 1u);
84
85 stack.allocate(10, 1);
86 REQUIRE(alloc.no_allocated() == 2u);
87
88 stack = detail::move(other);
89 REQUIRE(alloc.no_allocated() == 1u);
90 stack.unwind(m);
91 }
92 SECTION("marker comparision")
93 {
94 auto m1 = stack.top();
95 auto m2 = stack.top();
96 REQUIRE(m1 == m2);
97
98 stack.allocate(1, 1);
99 auto m3 = stack.top();
100 REQUIRE(m1 < m3);
101
102 stack.unwind(m2);
103 REQUIRE(stack.top() == m2);
104 }
105 SECTION("unwinder")
106 {
107 auto m = stack.top();
108 {
109 memory_stack_raii_unwind<decltype(stack)> unwind(stack);
110 stack.allocate(10, 1);
111 REQUIRE(unwind.will_unwind());
112 REQUIRE(&unwind.get_stack() == &stack);
113 REQUIRE(unwind.get_marker() == m);
114 }
115 REQUIRE(stack.top() == m);
116
117 memory_stack_raii_unwind<decltype(stack)> unwind(stack);
118 stack.allocate(10, 1);
119 unwind.unwind();
120 REQUIRE(stack.top() == m);
121 REQUIRE(unwind.will_unwind());
122
123 {
124 memory_stack_raii_unwind<decltype(stack)> unwind2(stack);
125 stack.allocate(10, 1);
126 unwind2.release();
127 REQUIRE(!unwind2.will_unwind());
128 }
129 REQUIRE(stack.top() > m);
130 m = stack.top();
131
132 unwind.release(); // need to release
133 unwind = memory_stack_raii_unwind<decltype(stack)>(stack);
134 REQUIRE(unwind.will_unwind());
135 REQUIRE(unwind.get_marker() == m);
136 REQUIRE(&unwind.get_stack() == &stack);
137 auto unwind2 = detail::move(unwind);
138 REQUIRE(unwind2.will_unwind());
139 REQUIRE(&unwind2.get_stack() == &stack);
140 REQUIRE(unwind2.get_marker() == m);
141 REQUIRE(!unwind.will_unwind());
142 }
143 SECTION("overaligned")
144 {
145 auto align = 2 * detail::max_alignment;
146 auto mem = stack.allocate(align, align);
147 REQUIRE(detail::is_aligned(mem, align));
148 }
149 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // Profiling code to check performance of allocators.
5
6 #include <iomanip>
7 #include <iostream>
8 #include <locale>
9
10 #include "allocator_storage.hpp"
11 #include "heap_allocator.hpp"
12 #include "new_allocator.hpp"
13 #include "memory_pool.hpp"
14 #include "memory_stack.hpp"
15
16 using namespace foonathan::memory;
17
18 #include "benchmark.hpp"
19
20 template <class Func, class... Allocators>
21 void benchmark_node(std::size_t count, std::size_t size, Allocators&... allocators)
22 {
23 int dummy[] = {(std::cout << benchmark(Func{count}, allocators, size) << '|', 0)...};
24 (void)dummy;
25 std::cout << '\n';
26 }
27
28 template <class Func>
29 void benchmark_node(std::initializer_list<std::size_t> counts,
30 std::initializer_list<std::size_t> node_sizes)
31 {
32 std::cout << "##" << Func::name() << "\n";
33 std::cout << '\n';
34 std::cout << "Size|Heap|New|Small|Node|Array|Stack\n";
35 std::cout << "----|-----|---|-----|----|-----|-----\n";
36 for (auto count : counts)
37 for (auto size : node_sizes)
38 {
39 auto heap_alloc = [&] { return heap_allocator{}; };
40 auto new_alloc = [&] { return new_allocator{}; };
41
42 auto small_alloc = [&] {
43 return memory_pool<small_node_pool>(size, count * size + 1024);
44 };
45 auto node_alloc = [&] {
46 return memory_pool<node_pool>(size, count * std::max(size, sizeof(char*)) + 1024);
47 };
48 auto array_alloc = [&] {
49 return memory_pool<array_pool>(size, count * std::max(size, sizeof(char*)) + 1024);
50 };
51
52 auto stack_alloc = [&] { return memory_stack<>(count * size); };
53
54 std::cout << count << "\\*" << size << "|";
55 benchmark_node<Func>(count, size, heap_alloc, new_alloc, small_alloc, node_alloc,
56 array_alloc, stack_alloc);
57 }
58 std::cout << '\n';
59 }
60
61 template <class Func, class Second, class... Tail>
62 void benchmark_node(std::initializer_list<std::size_t> counts,
63 std::initializer_list<std::size_t> node_sizes)
64 {
65 benchmark_node<Func>(counts, node_sizes);
66 benchmark_node<Second, Tail...>(counts, node_sizes);
67 }
68
69 template <class Func, class... Allocators>
70 void benchmark_array(std::size_t count, std::size_t array_size, std::size_t node_size,
71 Allocators&... allocators)
72 {
73 int dummy[] = {
74 (std::cout << benchmark(Func{count}, allocators, array_size, node_size) << '|', 0)...};
75 (void)dummy;
76 std::cout << '\n';
77 }
78
79 template <class Func>
80 void benchmark_array(std::initializer_list<std::size_t> counts,
81 std::initializer_list<std::size_t> node_sizes,
82 std::initializer_list<std::size_t> array_sizes)
83 {
84 using namespace foonathan::memory;
85 std::cout << "##" << Func::name() << "\n";
86 std::cout << '\n';
87 std::cout << "Size|Heap|New|Node|Array|Stack\n";
88 std::cout << "----|-----|---|----|-----|-----\n";
89 for (auto count : counts)
90 for (auto node_size : node_sizes)
91 for (auto array_size : array_sizes)
92 {
93 auto mem_needed = count * std::max(node_size, sizeof(char*)) * array_size + 1024;
94
95 auto heap_alloc = [&] { return heap_allocator{}; };
96 auto new_alloc = [&] { return new_allocator{}; };
97
98 auto node_alloc = [&] { return memory_pool<node_pool>(node_size, mem_needed); };
99 auto array_alloc = [&] { return memory_pool<array_pool>(node_size, mem_needed); };
100
101 auto stack_alloc = [&] { return memory_stack<>(count * mem_needed); };
102
103 std::cout << count << "\\*" << node_size << "\\*" << array_size << "|";
104 benchmark_array<Func>(count, array_size, node_size, heap_alloc, new_alloc,
105 node_alloc, array_alloc, stack_alloc);
106 }
107 std::cout << '\n';
108 }
109
110 template <class Func, class Second, class... Tail>
111 void benchmark_array(std::initializer_list<std::size_t> counts,
112 std::initializer_list<std::size_t> node_sizes,
113 std::initializer_list<std::size_t> array_sizes)
114 {
115 benchmark_array<Func>(counts, node_sizes, array_sizes);
116 benchmark_array<Second, Tail...>(counts, node_sizes, array_sizes);
117 }
118
119 int main(int argc, char* argv[])
120 {
121 if (argc >= 2)
122 sample_size = std::size_t(std::atoi(argv[1]));
123
124 class comma_numpunct : public std::numpunct<char>
125 {
126 protected:
127 virtual char do_thousands_sep() const
128 {
129 return ',';
130 }
131
132 virtual std::string do_grouping() const
133 {
134 return "\03";
135 }
136 };
137
138 std::cout.imbue({std::locale(), new comma_numpunct});
139
140 std::cout << "#Node\n\n";
141 benchmark_node<single, bulk, bulk_reversed, butterfly>({256, 512, 1024}, {1, 4, 8, 256});
142 std::cout << "#Array\n\n";
143 benchmark_array<single, bulk, bulk_reversed, butterfly>({256, 512}, {1, 4, 8}, {1, 4, 8});
144 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "segregator.hpp"
5
6 #include <catch.hpp>
7
8 #include "test_allocator.hpp"
9
10 using namespace foonathan::memory;
11
12 TEST_CASE("threshold_segregatable", "[adapter]")
13 {
14 using segregatable = threshold_segregatable<test_allocator>;
15 segregatable s(8u);
16
17 REQUIRE(s.use_allocate_node(1u, 1u));
18 REQUIRE(s.use_allocate_node(8u, 1u));
19 REQUIRE(s.use_allocate_node(8u, 100u));
20 REQUIRE(!s.use_allocate_node(9u, 1u));
21 REQUIRE(!s.use_allocate_node(9u, 100u));
22
23 REQUIRE(s.use_allocate_array(1u, 1u, 1u));
24 REQUIRE(s.use_allocate_array(1u, 8u, 1u));
25 REQUIRE(s.use_allocate_array(2u, 4u, 1u));
26 REQUIRE(!s.use_allocate_array(2u, 8u, 1u));
27 REQUIRE(!s.use_allocate_array(1u, 9u, 1u));
28 }
29
30 TEST_CASE("binary_segregator", "[adapter]")
31 {
32 using segregatable = threshold_segregatable<test_allocator>;
33 using segregator = binary_segregator<segregatable, test_allocator>;
34
35 segregator s(threshold(8u, test_allocator{}));
36 REQUIRE(s.get_segregatable_allocator().no_allocated() == 0u);
37 REQUIRE(s.get_fallback_allocator().no_allocated() == 0u);
38 REQUIRE(s.get_segregatable_allocator().no_deallocated() == 0u);
39 REQUIRE(s.get_fallback_allocator().no_deallocated() == 0u);
40
41 auto ptr = s.allocate_node(1u, 1u);
42 REQUIRE(s.get_segregatable_allocator().no_allocated() == 1u);
43 REQUIRE(s.get_fallback_allocator().no_allocated() == 0u);
44 s.deallocate_node(ptr, 1u, 1u);
45 REQUIRE(s.get_segregatable_allocator().no_deallocated() == 1u);
46 REQUIRE(s.get_fallback_allocator().no_deallocated() == 0u);
47
48 ptr = s.allocate_node(8u, 1u);
49 REQUIRE(s.get_segregatable_allocator().no_allocated() == 1u);
50 REQUIRE(s.get_fallback_allocator().no_allocated() == 0u);
51 s.deallocate_node(ptr, 8u, 1u);
52 REQUIRE(s.get_segregatable_allocator().no_deallocated() == 2u);
53 REQUIRE(s.get_fallback_allocator().no_deallocated() == 0u);
54
55 ptr = s.allocate_node(8u, 1u);
56 REQUIRE(s.get_segregatable_allocator().no_allocated() == 1u);
57 REQUIRE(s.get_fallback_allocator().no_allocated() == 0u);
58 s.deallocate_node(ptr, 8u, 1u);
59 REQUIRE(s.get_segregatable_allocator().no_deallocated() == 3u);
60 REQUIRE(s.get_fallback_allocator().no_deallocated() == 0u);
61
62 ptr = s.allocate_node(9u, 1u);
63 REQUIRE(s.get_segregatable_allocator().no_allocated() == 0u);
64 REQUIRE(s.get_fallback_allocator().no_allocated() == 1u);
65 s.deallocate_node(ptr, 9u, 1u);
66 REQUIRE(s.get_segregatable_allocator().no_deallocated() == 3u);
67 REQUIRE(s.get_fallback_allocator().no_deallocated() == 1u);
68
69 ptr = s.allocate_array(1u, 8u, 1u);
70 REQUIRE(s.get_segregatable_allocator().no_allocated() == 1u);
71 REQUIRE(s.get_fallback_allocator().no_allocated() == 0u);
72 s.deallocate_array(ptr, 1u, 8u, 1u);
73 REQUIRE(s.get_segregatable_allocator().no_deallocated() == 4u);
74 REQUIRE(s.get_fallback_allocator().no_deallocated() == 1u);
75
76 ptr = s.allocate_array(2u, 8u, 1u);
77 REQUIRE(s.get_segregatable_allocator().no_allocated() == 0u);
78 REQUIRE(s.get_fallback_allocator().no_allocated() == 1u);
79 s.deallocate_array(ptr, 2u, 8u, 1u);
80 REQUIRE(s.get_segregatable_allocator().no_deallocated() == 4u);
81 REQUIRE(s.get_fallback_allocator().no_deallocated() == 2u);
82 }
83
84 TEST_CASE("segregator", "[adapter]")
85 {
86 using segregatable = threshold_segregatable<test_allocator>;
87 using segregator_0 = segregator<segregatable>;
88 using segregator_1 = segregator<segregatable, test_allocator>;
89 using segregator_2 = segregator<segregatable, segregatable, test_allocator>;
90 using segregator_3 = segregator<segregatable, segregatable, segregatable, test_allocator>;
91
92 static_assert(std::is_same<segregator_0,
93 binary_segregator<segregatable, null_allocator>>::value,
94 "");
95 static_assert(std::is_same<segregator_1,
96 binary_segregator<segregatable, test_allocator>>::value,
97 "");
98 static_assert(std::is_same<segregator_2, binary_segregator<segregatable, segregator_1>>::value,
99 "");
100 static_assert(std::is_same<segregator_3, binary_segregator<segregatable, segregator_2>>::value,
101 "");
102
103 static_assert(segregator_size<segregator_0>::value == 1, "");
104 static_assert(segregator_size<segregator_1>::value == 1, "");
105 static_assert(segregator_size<segregator_2>::value == 2, "");
106 static_assert(segregator_size<segregator_3>::value == 3, "");
107
108 static_assert(std::is_same<segregatable_allocator_type<0, segregator_3>, test_allocator>::value,
109 "");
110 static_assert(std::is_same<segregatable_allocator_type<1, segregator_3>, test_allocator>::value,
111 "");
112 static_assert(std::is_same<segregatable_allocator_type<2, segregator_3>, test_allocator>::value,
113 "");
114
115 static_assert(std::is_same<fallback_allocator_type<segregator_3>, test_allocator>::value, "");
116
117 auto s = make_segregator(threshold(4, test_allocator{}), threshold(8, test_allocator{}),
118 threshold(16, test_allocator{}), test_allocator{});
119 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 0u);
120 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 0u);
121 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 0u);
122 REQUIRE(get_fallback_allocator(s).no_allocated() == 0u);
123
124 auto ptr = s.allocate_node(2, 1);
125 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 1u);
126 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 0u);
127 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 0u);
128 REQUIRE(get_fallback_allocator(s).no_allocated() == 0u);
129 s.deallocate_node(ptr, 2, 1);
130
131 ptr = s.allocate_node(4, 1);
132 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 1u);
133 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 0u);
134 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 0u);
135 REQUIRE(get_fallback_allocator(s).no_allocated() == 0u);
136 s.deallocate_node(ptr, 4, 1);
137
138 ptr = s.allocate_node(5, 1);
139 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 0u);
140 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 1u);
141 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 0u);
142 REQUIRE(get_fallback_allocator(s).no_allocated() == 0u);
143 s.deallocate_node(ptr, 5, 1);
144
145 ptr = s.allocate_node(8, 1);
146 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 0u);
147 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 1u);
148 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 0u);
149 REQUIRE(get_fallback_allocator(s).no_allocated() == 0u);
150 s.deallocate_node(ptr, 8, 1);
151
152 ptr = s.allocate_node(9, 1);
153 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 0u);
154 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 0u);
155 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 1u);
156 REQUIRE(get_fallback_allocator(s).no_allocated() == 0u);
157 s.deallocate_node(ptr, 9, 1);
158
159 ptr = s.allocate_node(16, 1);
160 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 0u);
161 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 0u);
162 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 1u);
163 REQUIRE(get_fallback_allocator(s).no_allocated() == 0u);
164 s.deallocate_node(ptr, 16, 1);
165
166 ptr = s.allocate_node(17, 1);
167 REQUIRE(get_segregatable_allocator<0>(s).no_allocated() == 0u);
168 REQUIRE(get_segregatable_allocator<1>(s).no_allocated() == 0u);
169 REQUIRE(get_segregatable_allocator<2>(s).no_allocated() == 0u);
170 REQUIRE(get_fallback_allocator(s).no_allocated() == 1u);
171 s.deallocate_node(ptr, 17, 1);
172 }
0 // Copyright (C) 2018 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include "smart_ptr.hpp"
5
6 #include <catch.hpp>
7
8 #include "container.hpp"
9 #include "memory_pool.hpp"
10
11 using namespace foonathan::memory;
12
13 struct dummy_allocator
14 {
15 static std::size_t size;
16
17 void* allocate_node(std::size_t s, std::size_t)
18 {
19 size = s;
20 return ::operator new(size);
21 }
22
23 void deallocate_node(void* ptr, std::size_t, std::size_t)
24 {
25 ::operator delete(ptr);
26 }
27 };
28
29 std::size_t dummy_allocator::size = 0;
30
31 TEST_CASE("allocate_shared", "[adapter]")
32 {
33 SECTION("stateless")
34 {
35 dummy_allocator::size = 0;
36 auto ptr = allocate_shared<int>(dummy_allocator{}, 42);
37 REQUIRE(*ptr == 42);
38 #if !defined(FOONATHAN_MEMORY_NO_NODE_SIZE)
39 REQUIRE((dummy_allocator::size <= allocate_shared_node_size<int, dummy_allocator>::value));
40 #endif
41 }
42 SECTION("stateful")
43 {
44 #if defined(FOONATHAN_MEMORY_NO_NODE_SIZE)
45 memory_pool<> pool(128, 1024); // hope that's enough
46 #else
47 memory_pool<> pool(allocate_shared_node_size<int, memory_pool<>>::value, 1024);
48 #endif
49 auto ptr = allocate_shared<int>(pool, 42);
50 REQUIRE(*ptr == 42);
51 }
52 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 // catch main file, generates main function
5
6 #define CATCH_CONFIG_MAIN
7 #define CATCH_CONFIG_COLOUR_NONE
8 #include "catch.hpp"
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_TEST_TEST_ALLOCATOR_HPP
5 #define FOONATHAN_MEMORY_TEST_TEST_ALLOCATOR_HPP
6
7 #include <unordered_map>
8
9 #include <foonathan/memory/heap_allocator.hpp>
10
11 struct memory_info
12 {
13 void* memory;
14 std::size_t size, alignment;
15 };
16
17 // RawAllocator with various security checks
18 class test_allocator
19 {
20 public:
21 using is_stateful = std::true_type;
22
23 void* allocate_node(std::size_t size, std::size_t alignment)
24 {
25 auto mem = foonathan::memory::heap_allocator().allocate_node(size, alignment);
26 last_allocated_ = {mem, size, alignment};
27 allocated_[mem] = last_allocated_;
28 return mem;
29 }
30
31 void deallocate_node(void* node, std::size_t size, std::size_t alignment) noexcept
32 {
33 ++dealloc_count_;
34 auto iter = allocated_.find(node);
35 if (iter == allocated_.end() || iter->second.size != size
36 || iter->second.alignment != alignment)
37 {
38 last_valid_ = false;
39 return;
40 }
41 else
42 allocated_.erase(iter);
43 foonathan::memory::heap_allocator().deallocate_node(node, size, alignment);
44 }
45
46 std::size_t max_node_size() const noexcept
47 {
48 return std::size_t(-1);
49 }
50
51 bool last_deallocation_valid() noexcept
52 {
53 return last_valid_;
54 }
55
56 void reset_last_deallocation_valid() noexcept
57 {
58 last_valid_ = true;
59 }
60
61 memory_info last_allocated() const noexcept
62 {
63 return last_allocated_;
64 }
65
66 std::size_t no_allocated() const noexcept
67 {
68 return allocated_.size();
69 }
70
71 std::size_t no_deallocated() const noexcept
72 {
73 return dealloc_count_;
74 }
75
76 void reset_deallocation_count() noexcept
77 {
78 dealloc_count_ = 0u;
79 }
80
81 private:
82 std::unordered_map<void*, memory_info> allocated_;
83 memory_info last_allocated_;
84 std::size_t dealloc_count_ = 0u;
85 bool last_valid_ = true;
86 };
87
88 #endif //FOONATHAN_MEMORY_TEST_TEST_ALLOCATOR_HPP
0 # Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 # This file is subject to the license terms in the LICENSE file
2 # found in the top-level directory of this distribution.
3
4 # builds tools
5
6 add_executable(foonathan_memory_node_size_debugger test_types.hpp node_size_debugger.hpp node_size_debugger.cpp)
7 if (CMAKE_CROSSCOMPILING)
8 # statically link when cross compiling so emulator doesn't need library paths
9 if (MSVC)
10 set_target_properties(foonathan_memory_node_size_debugger PROPERTIES LINK_FLAGS "/WHOLEARCHIVE")
11 else()
12 set_target_properties(foonathan_memory_node_size_debugger PROPERTIES LINK_FLAGS "-static")
13 endif()
14 endif()
15 if (MSVC)
16 target_compile_options(foonathan_memory_node_size_debugger PRIVATE "/bigobj")
17 endif()
18 target_compile_definitions(foonathan_memory_node_size_debugger PUBLIC
19 VERSION="${FOONATHAN_MEMORY_VERSION_MAJOR}.${FOONATHAN_MEMORY_VERSION_MINOR}")
20 set_target_properties(foonathan_memory_node_size_debugger PROPERTIES OUTPUT_NAME nodesize_dbg)
21 if(NOT MSVC)
22 target_compile_features(foonathan_memory_node_size_debugger PUBLIC cxx_constexpr)
23 endif()
24
25 install(TARGETS foonathan_memory_node_size_debugger EXPORT foonathan_memoryTargets
26 RUNTIME DESTINATION ${FOONATHAN_MEMORY_RUNTIME_INSTALL_DIR})
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #include <cctype>
5 #include <cstring>
6 #include <fstream>
7 #include <iomanip>
8 #include <iostream>
9 #include <string>
10
11 #include "node_size_debugger.hpp"
12
13 const char* const exe_name = "nodesize_dbg";
14 const std::string exe_spaces(std::strlen(exe_name), ' ');
15
16 struct simple_serializer
17 {
18 std::ostream& out;
19
20 void prefix() const {}
21
22 void operator()(const debug_result& result) const
23 {
24 out << result.container_name << ":\n";
25 for (auto pair : result.node_sizes)
26 out << '\t' << pair.first << '=' << pair.second << '\n';
27 }
28
29 void suffix() const {}
30 };
31
32 struct verbose_serializer
33 {
34 std::ostream& out;
35
36 void prefix() const {}
37
38 void operator()(const debug_result& result) const
39 {
40 out << "For container '" << result.container_name << "':\n";
41 for (auto pair : result.node_sizes)
42 out << '\t' << "With an alignment of " << std::setw(2) << pair.first
43 << " is the base node size " << std::setw(2) << pair.second << ".\n";
44 }
45
46 void suffix() const {}
47 };
48
49 struct code_serializer
50 {
51 std::ostream& out;
52 std::string alignment;
53 std::size_t tab_width;
54
55 void prefix() const
56 {
57 out << "// The following section was autogenerated by " << exe_name << '\n';
58 out << "//=== BEGIN AUTOGENERATED SECTION ===//\n\n";
59 }
60
61 void operator()(const debug_result& result) const
62 {
63 /* namespace detail
64 * {
65 * template <std::size_t Alignment>
66 * struct <name>_node_size;
67 *
68 * template <>
69 * struct <name>_node_size<I>
70 * : std::integral_constant<std::size_t, I_base_size>
71 * {};
72 *
73 * ...
74 * } // namespace detail
75 *
76 * template <typename T>
77 * struct <name>_node_size
78 * : std::integral_constant<std::size_t,
79 * detail::<name>_node_size<alignof(T)>::value + sizeof(T)>
80 * {};
81 */
82 auto newline = "\n";
83 out << "namespace detail" << newline << '{' << newline << tab()
84 << "template <std::size_t Alignment>" << newline << tab() << "struct "
85 << struct_name(result.container_name) << ';' << newline;
86 for (auto pair : result.node_sizes)
87 out << newline << tab() << "template <>" << newline << tab() << "struct "
88 << struct_name(result.container_name) << '<' << pair.first << '>' << newline
89 << tab() << ": std::integral_constant<std::size_t, " << pair.second << '>'
90 << newline << tab() << "{};" << newline;
91 out << "} // namespace detail" << newline << newline << "template <typename T>" << newline
92 << "struct " << struct_name(result.container_name) << newline
93 << ": std::integral_constant<std::size_t," << newline
94 << " detail::" << struct_name(result.container_name) << '<' << alignment
95 << ">::value + sizeof(T)>" << newline << "{};" << newline << newline;
96 }
97
98 void suffix() const
99 {
100 out << "//=== END AUTOGENERATED SECTION ===//\n";
101 }
102
103 std::string tab() const
104 {
105 if (tab_width == 0u)
106 return "\t";
107 return std::string(tab_width, ' ');
108 }
109
110 std::string struct_name(const char* container_name) const
111 {
112 return container_name + std::string("_node_size");
113 }
114 };
115
116 using debuggers =
117 std::tuple<debug_forward_list, debug_list, debug_set, debug_multiset, debug_unordered_set,
118 debug_unordered_multiset, debug_map, debug_multimap, debug_unordered_map,
119 debug_unordered_multimap, debug_shared_ptr_stateless, debug_shared_ptr_stateful>;
120
121 template <class Debugger, class Serializer>
122 void serialize_single(const Serializer& serializer)
123 {
124 serializer.prefix();
125 serializer(debug(Debugger{}));
126 serializer.suffix();
127 }
128
129 template <class Debugger, class Serializer>
130 int serialize_impl(const Serializer& serializer)
131 {
132 serializer(debug(Debugger()));
133 return 0;
134 }
135
136 template <class Serializer, class... Debuggers>
137 void serialize_impl(const Serializer& serializer, std::tuple<Debuggers...>)
138 {
139 int dummy[] = {serialize_impl<Debuggers>(serializer)...};
140 (void)dummy;
141 }
142
143 template <class Serializer>
144 void serialize(const Serializer& serializer)
145 {
146 serializer.prefix();
147 serialize_impl(serializer, debuggers{});
148 serializer.suffix();
149 }
150
151 void print_help(std::ostream& out)
152 {
153 out << "Usage: " << exe_name << " [--version][--help]\n";
154 out << " " << exe_spaces << " [--simple][--verbose]\n";
155 out << " " << exe_spaces
156 << " [--code [--alignof expr] [--append] [--tab digit] [outputfile]]\n";
157 out << "Obtains information about the internal node sizes of the STL containers.\n";
158 out << '\n';
159 out << " --simple\tprints node sizes in the form 'alignment=base-node-size'\n";
160 out << " --verbose\tprints node sizes in a more verbose form\n";
161 out << " --code\tgenerates C++ code to obtain the node size\n";
162 out << " --help\tdisplay this help and exit\n";
163 out << " --version\toutput version information and exit\n";
164 out << '\n';
165 out << "Options for code generation: \n";
166 out << " --alignof\tfollowed by an expression that calculates the alignment of a type named "
167 "'T', default is 'alignof(T)'\n";
168 out << " --append\tappend to the outputfile instead of overwriting it (the default)\n";
169 out << " --tab\tfollowed by single digit specifying tab width, 0 uses '\\t'\n";
170 out << '\n';
171 out << "The base node size is the size of the node without the storage for the value type.\n"
172 << "Add 'sizeof(value_type)' to the base node size for the appropriate alignment to get "
173 "the whole size.\n";
174 out << "With no options prints base node sizes of all containers in a simple manner.\n";
175 }
176
177 void print_version(std::ostream& out)
178 {
179 out << exe_name << " version " << VERSION << '\n';
180 }
181
182 int print_invalid_option(std::ostream& out, const char* option)
183 {
184 out << exe_name << ": invalid option -- '";
185 while (*option == '-')
186 ++option;
187 out << option << "'\n";
188 out << "Try '" << exe_name << " --help' for more information.\n";
189 return 2;
190 }
191
192 int print_invalid_argument(std::ostream& out, const char* option)
193 {
194 out << exe_name << ": invalid argument for option -- '" << option << "'\n";
195 out << "Try '" << exe_name << " --help' for more information.\n";
196 return 2;
197 }
198
199 int main(int argc, char* argv[])
200 {
201 if (argc <= 1 || argv[1] == std::string("--simple"))
202 serialize(simple_serializer{std::cout});
203 else if (argv[1] == std::string("--verbose"))
204 serialize(verbose_serializer{std::cout});
205 else if (argv[1] == std::string("--code"))
206 {
207 std::size_t tab_width = 4u;
208 std::string alignment = "alignof(T)";
209 auto append = false;
210 std::ofstream file;
211 std::ostream out(std::cout.rdbuf());
212
213 for (auto cur = &argv[2]; *cur; ++cur)
214 {
215 if (*cur == std::string("--tab"))
216 {
217 ++cur;
218 if (*cur && std::isdigit(cur[0][0]) && !cur[0][1])
219 tab_width = std::size_t(cur[0][0] - '0');
220 else
221 return print_invalid_argument(std::cerr, "-t");
222 }
223 else if (*cur == std::string("--alignof"))
224 {
225 ++cur;
226 if (*cur)
227 alignment = *cur;
228 else
229 return print_invalid_argument(std::cerr, "--alignof");
230 }
231 else if (!file.is_open() && *cur == std::string("--append"))
232 {
233 append = true;
234 }
235 else if (!file.is_open())
236 {
237 file.open(*cur, append ? std::ios_base::app : std::ios_base::out);
238 if (!file.is_open())
239 return print_invalid_argument(std::cerr, "outputfile");
240 out.rdbuf(file.rdbuf());
241 }
242 else
243 return print_invalid_argument(std::cerr, "--code");
244 }
245
246 code_serializer serializer{out, alignment, tab_width};
247 serialize(serializer);
248 }
249 else if (argv[1] == std::string("--help"))
250 print_help(std::cout);
251 else if (argv[1] == std::string("--version"))
252 print_version(std::cout);
253 else
254 return print_invalid_option(std::cerr, argv[1]);
255 }
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_TOOL_NODE_SIZE_DEBUGGER_HPP
5 #define FOONATHAN_MEMORY_TOOL_NODE_SIZE_DEBUGGER_HPP
6
7 #include <algorithm>
8 #include <memory>
9 #include <tuple>
10 #include <type_traits>
11
12 #include <forward_list>
13 #include <list>
14 #include <map>
15 #include <set>
16 #include <unordered_map>
17 #include <unordered_set>
18
19 template <typename TestType, class Debugger>
20 struct node_size_storage
21 {
22 static std::size_t size;
23 };
24
25 template <typename TT, class Debugger>
26 std::size_t node_size_storage<TT, Debugger>::size = 0;
27
28 struct empty_payload
29 {
30 };
31
32 // Obtains the node size for a container.
33 // Since the node type is private to the implementation,
34 // it cannot be accessed directly.
35 // It is only available to the allocator through rebinding.
36 // The allocator simply stores the size of the biggest type, it is rebound to,
37 // as long as it is not the TestType, the actual value_type of the container.
38 template <typename T, typename TestType, class Debugger, class AdditionalPayload = empty_payload>
39 class node_size_debugger : public std::allocator<T>, private AdditionalPayload
40 {
41 public:
42 template <typename Other>
43 struct rebind
44 {
45 using other = node_size_debugger<Other, TestType, Debugger, AdditionalPayload>;
46 };
47
48 node_size_debugger()
49 {
50 if (!std::is_same<T, TestType>::value)
51 node_size() = std::max(node_size(), sizeof(T));
52 }
53
54 template <typename U>
55 node_size_debugger(node_size_debugger<U, TestType, Debugger, AdditionalPayload>)
56 {
57 if (!std::is_same<T, TestType>::value)
58 node_size() = std::max(node_size(), sizeof(T));
59 }
60
61 static std::size_t& node_size()
62 {
63 return node_size_storage<TestType, Debugger>::size;
64 }
65
66 private:
67 template <typename U, typename TT, class Dbg, class Payload>
68 friend class node_size_debugger;
69 };
70
71 struct hash
72 {
73 // note: not noexcept! this leads to a cached hash value
74 template <typename T>
75 std::size_t operator()(const T&) const
76 {
77 // quality doesn't matter
78 return 0;
79 }
80 };
81
82 struct debug_forward_list
83 {
84 const char* name() const
85 {
86 return "forward_list";
87 }
88
89 template <typename T>
90 std::size_t debug()
91 {
92 std::forward_list<T, node_size_debugger<T, T, debug_forward_list>> list;
93 list.push_front(T());
94 list.push_front(T());
95 list.push_front(T());
96 return list.get_allocator().node_size() - sizeof(T);
97 }
98 };
99
100 struct debug_list
101 {
102 const char* name() const
103 {
104 return "list";
105 }
106
107 template <typename T>
108 std::size_t debug()
109 {
110 std::list<T, node_size_debugger<T, T, debug_list>> list;
111 list.push_front(T());
112 list.push_front(T());
113 list.push_front(T());
114 return list.get_allocator().node_size() - sizeof(T);
115 }
116 };
117
118 struct debug_set
119 {
120 const char* name() const
121 {
122 return "set";
123 }
124
125 template <typename T>
126 std::size_t debug()
127 {
128 std::set<T, std::less<T>, node_size_debugger<T, T, debug_set>> set;
129 set.insert(T());
130 set.insert(T());
131 set.insert(T());
132 return set.get_allocator().node_size() - sizeof(T);
133 }
134 };
135
136 struct debug_multiset
137 {
138 const char* name() const
139 {
140 return "multiset";
141 }
142
143 template <typename T>
144 std::size_t debug()
145 {
146 std::multiset<T, std::less<T>, node_size_debugger<T, T, debug_multiset>> set;
147 set.insert(T());
148 set.insert(T());
149 set.insert(T());
150 return set.get_allocator().node_size() - sizeof(T);
151 }
152 };
153
154 struct debug_unordered_set
155 {
156 const char* name() const
157 {
158 return "unordered_set";
159 }
160
161 template <typename T>
162 std::size_t debug()
163 {
164 std::unordered_set<T, hash, std::equal_to<T>, node_size_debugger<T, T, debug_unordered_set>>
165 set;
166 set.insert(T());
167 set.insert(T());
168 set.insert(T());
169 return set.get_allocator().node_size() - sizeof(T);
170 }
171 };
172
173 struct debug_unordered_multiset
174 {
175 const char* name() const
176 {
177 return "unordered_multiset";
178 }
179
180 template <typename T>
181 std::size_t debug()
182 {
183 std::unordered_multiset<T, hash, std::equal_to<T>,
184 node_size_debugger<T, T, debug_unordered_multiset>>
185 set;
186 set.insert(T());
187 set.insert(T());
188 set.insert(T());
189 return set.get_allocator().node_size() - sizeof(T);
190 }
191 };
192
193 struct debug_map
194 {
195 const char* name() const
196 {
197 return "map";
198 }
199
200 template <typename T>
201 std::size_t debug()
202 {
203 using type = std::pair<const T, T>;
204 std::map<T, T, std::less<T>, node_size_debugger<type, type, debug_map>> map;
205 map.insert(std::make_pair(T(), T()));
206 map.insert(std::make_pair(T(), T()));
207 map.insert(std::make_pair(T(), T()));
208 return map.get_allocator().node_size() - sizeof(typename decltype(map)::value_type);
209 }
210 };
211
212 struct debug_multimap
213 {
214 const char* name() const
215 {
216 return "multimap";
217 }
218
219 template <typename T>
220 std::size_t debug()
221 {
222 using type = std::pair<const T, T>;
223 std::multimap<T, T, std::less<T>, node_size_debugger<type, type, debug_multimap>> map;
224 map.insert(std::make_pair(T(), T()));
225 map.insert(std::make_pair(T(), T()));
226 map.insert(std::make_pair(T(), T()));
227 return map.get_allocator().node_size() - sizeof(typename decltype(map)::value_type);
228 }
229 };
230
231 struct debug_unordered_map
232 {
233 const char* name() const
234 {
235 return "unordered_map";
236 }
237
238 template <typename T>
239 std::size_t debug()
240 {
241 using type = std::pair<const T, T>;
242 std::unordered_map<T, T, hash, std::equal_to<T>,
243 node_size_debugger<type, type, debug_unordered_map>>
244 map;
245 map.insert(std::make_pair(T(), T()));
246 map.insert(std::make_pair(T(), T()));
247 map.insert(std::make_pair(T(), T()));
248 return map.get_allocator().node_size() - sizeof(typename decltype(map)::value_type);
249 }
250 };
251
252 struct debug_unordered_multimap
253 {
254 const char* name() const
255 {
256 return "unordered_multimap";
257 }
258
259 template <typename T>
260 std::size_t debug()
261 {
262 using type = std::pair<const T, T>;
263 std::unordered_multimap<T, T, hash, std::equal_to<T>,
264 node_size_debugger<type, type, debug_unordered_multimap>>
265 map;
266 map.insert(std::make_pair(T(), T()));
267 map.insert(std::make_pair(T(), T()));
268 map.insert(std::make_pair(T(), T()));
269 return map.get_allocator().node_size() - sizeof(typename decltype(map)::value_type);
270 }
271 };
272
273 struct debug_shared_ptr_stateless
274 {
275 const char* name() const
276 {
277 return "shared_ptr_stateless";
278 }
279
280 template <typename T>
281 std::size_t debug()
282 {
283 struct allocator_reference_payload
284 {
285 };
286
287 auto ptr = std::allocate_shared<T>(
288 node_size_debugger<T, T, debug_shared_ptr_stateless, allocator_reference_payload>());
289 auto ptr2 = std::allocate_shared<T>(
290 node_size_debugger<T, T, debug_shared_ptr_stateless, allocator_reference_payload>());
291 return node_size_debugger<T, T, debug_shared_ptr_stateless>::node_size();
292 }
293 };
294
295 struct debug_shared_ptr_stateful
296 {
297 const char* name() const
298 {
299 return "shared_ptr_stateful";
300 }
301
302 template <typename T>
303 std::size_t debug()
304 {
305 struct allocator_reference_payload
306 {
307 void* ptr;
308 };
309
310 auto ptr = std::allocate_shared<T>(
311 node_size_debugger<T, T, debug_shared_ptr_stateful, allocator_reference_payload>());
312 auto ptr2 = std::allocate_shared<T>(
313 node_size_debugger<T, T, debug_shared_ptr_stateful, allocator_reference_payload>());
314 return node_size_debugger<T, T, debug_shared_ptr_stateful>::node_size();
315 }
316 };
317
318 template <typename T, class Debugger>
319 std::size_t debug_single(Debugger debugger)
320 {
321 return debugger.template debug<T>();
322 }
323
324 #include "test_types.hpp"
325
326 // Maps the alignment of the test types to the base size of the node.
327 // The base size of the node is the node size obtained via the allocator
328 // but without the storage for the value type.
329 // It is only dependent on the alignment of the value type.
330 using node_size_map = std::map<std::size_t, std::size_t>;
331
332 struct debug_result
333 {
334 const char* container_name;
335 node_size_map node_sizes;
336 };
337
338 template <class Debugger, typename... Types>
339 node_size_map debug_impl(Debugger debugger, std::tuple<Types...>)
340 {
341 node_size_map result;
342 int dummy[] = {(result[alignof(Types)] = debug_single<Types>(debugger), 0)...};
343 (void)dummy;
344 return result;
345 }
346
347 template <class Debugger>
348 debug_result debug(Debugger debugger)
349 {
350 return {debugger.name(), debug_impl(debugger, test_types{})};
351 }
352
353 #endif //FOONATHAN_MEMORY_TOOL_NODE_SIZE_DEBUGGER_HPP
0 // Copyright (C) 2015-2020 Jonathan Müller <jonathanmueller.dev@gmail.com>
1 // This file is subject to the license terms in the LICENSE file
2 // found in the top-level directory of this distribution.
3
4 #ifndef FOONATHAN_MEMORY_TOOL_TEST_TYPES_HPP_INCLUDED
5 #define FOONATHAN_MEMORY_TOOL_TEST_TYPES_HPP_INCLUDED
6
7 #include <cstddef>
8 #include <tuple>
9
10 #if !defined(_MSC_VER)
11
12 // erases duplicate alignments
13 // adopted from https://github.com/irrequietus/clause/blob/alignutil/clause/ample/storage/alignutil.hh
14 // Copyright (C) 2013 - 2016 George Makrydakis <george@irrequietus.eu>
15 namespace detail
16 {
17 template <typename T>
18 using M0 = typename T::type;
19
20 /*~
21 * @note Forward declarations for several utility templates that are to be used
22 * for emulating high order functions over a pack without using the rest
23 * of the clause::ample library for two reasons: (1) alignof allows for
24 * special optimizations when applied over a pack of types range for the
25 * "sorting by alignof" step; (2) a single header solution was required
26 * and depending on other parts would mean bring increasing compoments
27 * of full-fledged metaprogramming library features. This header is to
28 * provide utilities for aligned storage and it came up when a challenge
29 * was thrown to me during a discussion with my fellow C++ programmers
30 * Jonathan Müller and Manu Sánchez. Purpose of inclusion to `clause is
31 * simple: it can be of use when analyzing boilerplate generation for
32 * runtime containers and memory allocators by template metaprogramming.
33 *
34 * tl;dr: fully standalone header for getting a duplicate-free, sorted by
35 * alignment list of types unique by alignment.
36 */
37 template <typename...>
38 struct M1; // Insert by alignof (map)
39 template <typename, typename>
40 struct M2; // Remove by alignof (M1 map)
41 template <typename...>
42 struct M3; // A pack wrap instead of M1
43 template <typename, typename, typename>
44 struct M4; // 'foldl,fmap' dups to M1<>
45 template <typename, typename...>
46 struct M5; // Remove M1<>
47 template <typename, std::size_t, std::size_t, std::size_t...>
48 struct M6; // Sort by alignof
49
50 /*~
51 * @note Both `M1,`M2 are used as a mutable compile-time "map"; `M1 inheritance
52 * of function signature of the kind:
53 *
54 * static auto C(int(*)[alignof(X)]) -> X
55 *
56 * is used as a key/value store in the first "fold", while `M2 is used for
57 * a lookup removing occurences of duplicates in the second "fold" by
58 * substituting each with `M1<>; this is orchestrated by `M4 while cleanup
59 * is handled by `M5 (removal of those `M1<> markers).
60 */
61 template <typename X, typename... T>
62 struct M1<X, T...> : M1<T...>
63 {
64 using M1<T...>::C;
65
66 static auto C(int (*)[alignof(X)]) -> X;
67
68 static std::size_t constexpr min_val =
69 alignof(X) < M1<T...>::min_val ? alignof(X) : M1<T...>::min_val;
70
71 static std::size_t constexpr max_val =
72 alignof(X) > M1<T...>::max_val ? alignof(X) : M1<T...>::max_val;
73
74 template <template <typename...> class W>
75 using rebind = W<X, T...>;
76 };
77
78 template <>
79 struct M1<>
80 {
81 static M1<> C(...);
82 static std::size_t constexpr min_val = 1;
83 static std::size_t constexpr max_val = 1;
84
85 template <template <typename...> class W>
86 using rebind = W<>;
87 };
88
89 template <typename W, typename X>
90 struct M2 : W
91 {
92 using W::C;
93 static auto C(int (*)[alignof(X)]) -> M1<>;
94 };
95
96 template <typename...>
97 struct M3
98 { /* one could use M1 instead, but it renders the code more cryptic */
99 };
100
101 /*~
102 * @note Scanning for duplicates while removing them at the same time.
103 */
104 template <typename S, typename A, template <typename...> class W, typename... X, typename... Y>
105 struct M4<S, W<A, X...>, W<Y...>>
106 : M4<M2<S, A>, W<X...>, W<Y..., decltype(S::C((int (*)[alignof(A)])(nullptr)))>>
107 {
108 };
109
110 template <typename S, template <typename...> class W, typename... Y>
111 struct M4<S, W<>, W<Y...>>
112 {
113 using type = W<Y...>;
114 };
115
116 template <typename A, typename...>
117 struct M5
118 {
119 using type = A;
120 };
121
122 /*~
123 * @note Cleaning up random empty `M1<> types after `M4.
124 */
125 template <template <typename...> class W, typename... A, typename... B, typename X>
126 struct M5<W<A...>, W<X, B...>> : M5<W<A..., X>, W<B...>>
127 {
128 };
129
130 template <template <typename...> class W, typename... A, typename... B>
131 struct M5<W<A...>, W<M1<>, B...>> : M5<W<A...>, W<B...>>
132 {
133 };
134
135 template <template <typename...> class W, typename... A>
136 struct M5<W<A...>, W<>> : M5<M1<A...>>
137 {
138 }; // ::type instantiates to M1<A...> !
139
140 /*~
141 * @note Sorting step; because of alignof(X) being a power of 2 and the way
142 * our "map" in M1/M2 works, it is extremely simple to optimize using
143 * linear expansion of a sequence of powers of two, then use intrinsic
144 * "fmap" properties of the triple-dot operator for pack expansion to
145 * yield the types remaining in the M1/M2 "map" (here, it is the S type
146 * parameter). Iterates through min/max values (parameters A, B) by
147 * creating that sequence then deploying it upon the ::C(int(*)[Z])
148 * function signature doing the lookup for M1/M2.
149 */
150 template <typename S, std::size_t A, std::size_t B, std::size_t... Z>
151 struct M6 : M6<S, A * 2, B, Z..., A>
152 {
153 };
154
155 template <typename S, std::size_t A, std::size_t... Z>
156 struct M6<S, A, A, Z...>
157 {
158 using type =
159 M1<decltype(S::C((int (*)[Z])(nullptr)))..., decltype(S::C((int (*)[A])(nullptr)))>;
160 };
161
162 /*~
163 * @note Assembling everything together; `M0 is just for convenience purposes
164 * in order to avoid writing typename Type::type where applicable; while
165 * the `M4 cleans up duplicates by replacement through `M1 and `M2 lookup
166 * in combination with triple-dot expansion ("fmap"...). Notice that `M1
167 * is re-used many times as a plain linear container itself, upon which
168 * `M4 partial specializations match through ordering.
169 */
170 template <typename... X>
171 using M7 = M0<
172 M5<M3<>,
173 M0<M4<M1<decltype(M1<X...>::C((int (*)[alignof(X)])(nullptr)))...>, M3<X...>, M3<>>>>>;
174
175 /*~
176 * @note The final result is given by this template alias, instantiating to a
177 * `M1 wrapped pack containing everything that is used afterwards via
178 * a ::template rebind instantiation to wrap to an end user defined
179 * template template parameter type. Through this, `M6 will run only
180 * through the necessary range of powers of 2 for the sorting to occur.
181 */
182 template <typename... X>
183 using unisorted_aligned_ = M0<M6<M7<X...>, M1<X...>::min_val, M1<X...>::max_val>>;
184
185 /*~
186 * @desc Given a sequence of types that may contain duplicates of both quality
187 * (the kind of type X) and of alignment (result of alignedof(X)) provide
188 * the equivalent sorted list of unique types by alignment. Semantics are
189 * eager.
190 * @parm W : template template parameter type wrapping a sequence of types.
191 * @parm X...: parameter pack containing the aforementioned types.
192 */
193 template <template <typename...> class W, typename... X>
194 using unisorted_aligned_wrap = typename unisorted_aligned_<X...>::template rebind<W>;
195 } // namespace detail
196
197 // All fundamental types that don't guarantee to have the same alignment (like int and unsigned int).
198 // It thus covers all fundamental alignments and all possible node sizes.
199 // Does not support extended alignments!
200 // The cryptic template stuff above erases duplicate alignments
201 using test_types = detail::unisorted_aligned_wrap<std::tuple, char, bool, short, int, long,
202 long long, float, double, long double>;
203 #else
204 using test_types = std::tuple<char, bool, short, int, long, long long, float, double, long double>;
205 #endif
206
207 #endif // FOONATHAN_MEMORY_TOOL_TEST_TYPES_HPP_INCLUDED