Ticket #2020: MemoryPool_BucketAlloc.patch

File MemoryPool_BucketAlloc.patch, 46.9 KB (added by Jorma Rebane, 11 years ago)

Pretty much final. Fixed basic_string wrapper, support for std::string with ps::to_string and ps::string_ref.

  • lib/allocators/memory_pool.cpp

     
     1/* Copyright (c) 2013 Wildfire Games
     2 *
     3 * Permission is hereby granted, free of charge, to any person obtaining
     4 * a copy of this software and associated documentation files (the
     5 * "Software"), to deal in the Software without restriction, including
     6 * without limitation the rights to use, copy, modify, merge, publish,
     7 * distribute, sublicense, and/or sell copies of the Software, and to
     8 * permit persons to whom the Software is furnished to do so, subject to
     9 * the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included
     12 * in all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     17 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
     18 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     19 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     20 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22#include "precompiled.h"
     23#include "memory_pool.h"
     24
     25
     26#if MSC_VERSION // VC++
     27    #define threadlocal __declspec(thread)
     28#else // all others (GCC, CLANG, etc..) use __thread
     29    #define threadlocal __thread
     30#endif
     31static threadlocal global_pool_t* _tls_pools = NULL;    // storage array for thread-local pools
     32
     33
     34/**
     35 * Edit these values according to memory tuning data
     36 * Each value represents a COUNT HINT of nodes to initially allocate per pool
     37 * Buckets [0..24]      4 8 12 16 20 24
     38 * Buckets [25..64]     32 40 48 56 64
     39 * Buckets [65..128]    80 96 112 128
     40 * Buckets [129..256]   160 192 224 256
     41 * Buckets [257..1024]  384 512 640 768 896 1024
     42 */
     43enum E_TLS_POOLS_RESERVE_COUNT_HINTS
     44{
     45    POOL_4 = 4096,
     46    POOL_8 = 4096,
     47    POOL_12 = 4096,
     48    POOL_16 = 2048,
     49    POOL_20 = 1024,
     50    POOL_24 = 512,
     51
     52    // strings that don't fit std::string static buffer fall here first
     53    // so give 32 a good reserve
     54    POOL_32 = 512,
     55    POOL_40 = 224,
     56    POOL_48 = 192,
     57    POOL_56 = 160,
     58    POOL_64 = 128,
     59
     60    // this is a pretty allocator intensive range:
     61    POOL_80 = 128,
     62    POOL_96 = 96,
     63    POOL_112 = 64,
     64    POOL_128 = 64,
     65
     66    POOL_160 = 48,
     67    POOL_192 = 48,
     68    POOL_224 = 48,
     69    POOL_256 = 48,
     70
     71    // these esoteric ranges need a lot more tuning:
     72    POOL_384 = 48,
     73    POOL_512 = 32,
     74    POOL_640 = 24,
     75    POOL_768 = 24,
     76    POOL_896 = 16,
     77    POOL_1024 = 16,
     78};
     79
     80
     81
     82// lookup table for _tls_pools [4..1024]
     83// element index: ((TSIZE+3)/4)-1
     84static unsigned char _pool_indices[256] =
     85{
     86    // 0..24
     87    // 4 8 12 16 20 24
     88    0,  // 4 (4)
     89    1,  // 8 (8)
     90    2,  // 12 (12)
     91    3,  // 16 (16)
     92    4,  // 20 (20)
     93    5,  // 24 (24)
     94
     95    // 25..64
     96    // 32 40 48 56 64
     97    6,      // 28 (32)
     98    6,      // 32 (32)
     99    7,      // 36 (40)
     100    7,      // 40 (40)
     101    8,      // 44 (48)
     102    8,      // 48 (48)
     103    9,      // 52 (56)
     104    9,      // 56 (56)
     105    10,     // 60 (64)
     106    10,     // 64 (64)
     107
     108    // 65..128
     109    // 80 96 112 128
     110    11,     // 68 (80)
     111    11,     // 72 (80)
     112    11,     // 76 (80)
     113    11,     // 80 (80)
     114        12,     // 84 (96)
     115        12,     // 88 (96)
     116        12,     // 92 (96)
     117        12,     // 96 (96)
     118    13,     // 100 (112)
     119    13,     // 104 (112)
     120    13,     // 108 (112)
     121    13,     // 112 (112)
     122        14,     // 116 (128)
     123        14,     // 120 (128)
     124        14,     // 124 (128)
     125        14,     // 128 (128)
     126
     127    // 129..256
     128    // 160 192 224 256
     129    15, 15, // (160)
     130    15, 15, // (160)
     131    15, 15, // (160)
     132    15, 15, // (160)
     133        16, 16, // (192)
     134        16, 16, // (192)
     135        16, 16, // (192)
     136        16, 16, // (192)
     137    17, 17, // (224)
     138    17, 17, // (224)
     139    17, 17, // (224)
     140    17, 17, // (224)
     141        18, 18, // (256)
     142        18, 18, // (256)
     143        18, 18, // (256)
     144        18, 18, // (256)
     145
     146    // 257..1024
     147    // 384 512 640 768 896 1024
     148    19, 19, 19, 19, // 384
     149    19, 19, 19, 19, // 384
     150    19, 19, 19, 19, // 384
     151    19, 19, 19, 19, // 384
     152    19, 19, 19, 19, // 384
     153    19, 19, 19, 19, // 384
     154    19, 19, 19, 19, // 384
     155    19, 19, 19, 19, // 384
     156
     157    20, 20, 20, 20, // 512
     158    20, 20, 20, 20, // 512
     159    20, 20, 20, 20, // 512
     160    20, 20, 20, 20, // 512
     161    20, 20, 20, 20, // 512
     162    20, 20, 20, 20, // 512
     163    20, 20, 20, 20, // 512
     164    20, 20, 20, 20, // 512
     165
     166    21, 21, 21, 21, // 640
     167    21, 21, 21, 21, // 640
     168    21, 21, 21, 21, // 640
     169    21, 21, 21, 21, // 640
     170    21, 21, 21, 21, // 640
     171    21, 21, 21, 21, // 640
     172    21, 21, 21, 21, // 640
     173    21, 21, 21, 21, // 640
     174   
     175    22, 22, 22, 22, // 768
     176    22, 22, 22, 22, // 768
     177    22, 22, 22, 22, // 768
     178    22, 22, 22, 22, // 768
     179    22, 22, 22, 22, // 768
     180    22, 22, 22, 22, // 768
     181    22, 22, 22, 22, // 768
     182    22, 22, 22, 22, // 768
     183   
     184    23, 23, 23, 23, // 896
     185    23, 23, 23, 23, // 896
     186    23, 23, 23, 23, // 896
     187    23, 23, 23, 23, // 896
     188    23, 23, 23, 23, // 896
     189    23, 23, 23, 23, // 896
     190    23, 23, 23, 23, // 896
     191    23, 23, 23, 23, // 896
     192
     193    24, 24, 24, 24, // 1024
     194    24, 24, 24, 24, // 1024
     195    24, 24, 24, 24, // 1024
     196    24, 24, 24, 24, // 1024
     197    24, 24, 24, 24, // 1024
     198    24, 24, 24, 24, // 1024
     199    24, 24, 24, 24, // 1024
     200    24, 24, 24, 24, // 1024
     201};
     202
     203
     204
     205
     206static void _tls_pools_init()
     207{
     208    #define DYNAMIC_POOL(SIZE) global_pool_t(SIZE, POOL_##SIZE)
     209
     210    // allocate storage for all the pools ( we never free this )
     211    _tls_pools = (global_pool_t*)malloc(sizeof(global_pool_t) * 25);
     212    _tls_pools[0] = DYNAMIC_POOL(4);
     213    _tls_pools[1] = DYNAMIC_POOL(8);
     214    _tls_pools[2] = DYNAMIC_POOL(12);
     215    _tls_pools[3] = DYNAMIC_POOL(16);
     216    _tls_pools[4] = DYNAMIC_POOL(20);
     217    _tls_pools[5] = DYNAMIC_POOL(24);
     218    _tls_pools[6] = DYNAMIC_POOL(32);
     219    _tls_pools[7] = DYNAMIC_POOL(40);
     220    _tls_pools[8] = DYNAMIC_POOL(48);
     221    _tls_pools[9] = DYNAMIC_POOL(56);
     222    _tls_pools[10] = DYNAMIC_POOL(64);
     223    _tls_pools[11] = DYNAMIC_POOL(80);
     224    _tls_pools[12] = DYNAMIC_POOL(96);
     225    _tls_pools[13] = DYNAMIC_POOL(112);
     226    _tls_pools[14] = DYNAMIC_POOL(128);
     227    _tls_pools[15] = DYNAMIC_POOL(160);
     228    _tls_pools[16] = DYNAMIC_POOL(192);
     229    _tls_pools[17] = DYNAMIC_POOL(224);
     230    _tls_pools[18] = DYNAMIC_POOL(256);
     231    _tls_pools[19] = DYNAMIC_POOL(384);
     232    _tls_pools[20] = DYNAMIC_POOL(512);
     233    _tls_pools[21] = DYNAMIC_POOL(640);
     234    _tls_pools[22] = DYNAMIC_POOL(768);
     235    _tls_pools[23] = DYNAMIC_POOL(896);
     236    _tls_pools[24] = DYNAMIC_POOL(1024);
     237}
     238
     239
     240
     241global_pool_t* _get_tls_pool(unsigned int requestSize)
     242{
     243    if(!_tls_pools) // this thread has not initialized the _tls_pools array?
     244    {
     245        _tls_pools_init(); // initialize the pools for this thread
     246    }
     247
     248    // calculating the index: alignto4(requestSize) / 4  -  1
     249    const uint32_t index = (uint32_t)_pool_indices[ ((requestSize + 3) >> 2) - 1 ];
     250    return &_tls_pools[index];
     251}
  • lib/allocators/memory_pool.h

     
     1/* Copyright (c) 2013 Wildfire Games
     2 *
     3 * Permission is hereby granted, free of charge, to any person obtaining
     4 * a copy of this software and associated documentation files (the
     5 * "Software"), to deal in the Software without restriction, including
     6 * without limitation the rights to use, copy, modify, merge, publish,
     7 * distribute, sublicense, and/or sell copies of the Software, and to
     8 * permit persons to whom the Software is furnished to do so, subject to
     9 * the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included
     12 * in all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     17 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
     18 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     19 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     20 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22#ifndef INCLUDED_MEMORY_POOL
     23#define INCLUDED_MEMORY_POOL
     24
     25
     26/**
     27 * @author Jorma Rebane
     28 * @date 2013.06.27
     29 *
     30 * We're using a SIZE based Pool.
     31 * This is useful if you use memory pools a lot and want
     32 * to share the pools accross many different types.
     33 *
     34 * The STL-compatible bucket_allocator<T> is an interface on top of a
     35 * collection of Thread-Local memory pools, indexed by the request size in bytes.
     36 * The maximum request size for bucket_allocator<T> is 1024 bytes. For all other cases
     37 * the standard malloc is used instead.
     38 */
     39
     40#include <lib/debug.h> // ENSURE
     41#include <stdlib.h> // malloc/free
     42#include <string.h> // memmove
     43#include <stdint.h> // uint32_t
     44#include <memory>   // std::allocator
     45
     46
     47
     48
     49#define ENABLE_POOL_DEBUGGING_DEBUG 1 // enable pool debugging in Debug mode?
     50#define ENABLE_POOL_DEBUGGING_RELEASE 0 // enable pool debugging in Release mode?
     51
     52
     53// For auto-calculating the initial capacity of pools. Very crude.
     54// This is why you should always 'know better' and give a size hint to your pool!
     55#define POOL_AUTOSIZE(TSIZE) ((TSIZE <= 128) ? 8192/TSIZE : (8192*2)/TSIZE)
     56
     57
     58// maximum bucket size for the global allocator
     59#define POOL_MAX_BUCKET_SIZE 1024
     60
     61
     62
     63
     64
     65
     66
     67
     68// dummy type for setting the dynamic_pool GC value
     69typedef bool pool_gc_t;
     70const pool_gc_t use_gc = (pool_gc_t)true;
     71const pool_gc_t no_gc = (pool_gc_t)false;
     72
     73
     74#ifdef NDEBUG // release?
     75    #define POOL_NDEBUG !ENABLE_POOL_DEBUGGING_RELEASE // is pool debugging disabled? pool:no-debug
     76    #define POOL_DEBUG ENABLE_POOL_DEBUGGING_RELEASE // is pool debugging enabled? pool:debug
     77#else
     78    #define POOL_NDEBUG !ENABLE_POOL_DEBUGGING_DEBUG // is pool debugging disabled? pool:no-debug
     79    #define POOL_DEBUG ENABLE_POOL_DEBUGGING_DEBUG // is pool debugging enabled? pool:debug
     80#endif
     81
     82
     83#define POOL_GUARDBYTES ((void*)0xCAFED00D)
     84
     85
     86
     87
     88
     89/**
     90 * Our memory pool is a SIZE based pool, meaning you specify the size
     91 * of a single object as the template parameter.
     92 * If the pool has no more available elements, alloc() returns NULL.
     93 * You can return objects to the pool by using  dealloc().
     94 * You can clear the state of the pool with clear() - this will make the
     95 * allocator forget its free list resets the available number of objects.
     96 */
     97class pool
     98{
     99public:
     100#if MSC_VERSION
     101    #pragma warning(push)
     102    #pragma warning(disable:4200)
     103#endif
     104    // Size of the actual pool object is 20 bytes,
     105    // the allocated data follows right after the pool.
     106    // We could use ushort for available/freed, but that would
     107    // misalign the data. This favors newer x86/x64 model performance.
     108
     109    unsigned sizeOf;    // sizeof each element
     110    unsigned available; // number of unhanded allocations
     111    unsigned freed;     // number in free list
     112
     113
     114    struct node
     115    {
     116    #if POOL_NDEBUG
     117        union {
     118            node* next; // free list next pointer
     119            char data[];    // the actual data
     120        };
     121    #else
     122        // We need the debug facility to detect writes into deleted handles
     123        // Writing into a handle after deleting it will cause the freed
     124        // list to segfault. This will allow us to catch that.
     125        node* next;     // free list next pointer
     126        union {
     127            void* guard;    // guard bytes
     128            char data[];    // the actual data
     129        };
     130    #endif
     131    };
     132
     133
     134    node* list;     // free list pointer
     135    char* end;      // pointer to the end of the buffer (first invalid address)
     136    char buffer[];// the actual buffer contained in this object
     137#if MSC_VERSION
     138    #pragma warning(pop)
     139#endif
     140
     141public:
     142
     143    /**
     144     * Creates a new pool<TSIZE> object. Pool is a variable size object
     145     * allocated as sizeof(pool) + TSIZE*capacity + alignto(8192)
     146     * @param capacity Number of objects to reserve space for
     147     * @param size_of Size of each element
     148     * @return A new pool object
     149     */
     150    static pool* create(unsigned capacity, unsigned size_of)
     151    {
     152        #if POOL_DEBUG
     153            size_of += sizeof(node*); // debug mode needs room for an extra ptr
     154        #else
     155            if(size_of < sizeof(node*))
     156                size_of = sizeof(node*); // ensure minimum size of node* is achieved for release mode
     157        #endif
     158        if(size_of % 4 != 0) // have to align to 4 byte boundary?
     159            size_of = size_of + 4 - size_of % 4;
     160
     161        // calculate size of this structure + buffer size
     162        int allocSize = sizeof(pool) + size_of * capacity;
     163        // align alloc size to a 8192 byte chunk
     164        allocSize = (allocSize % 8192) ? allocSize + 8192 - (allocSize % 8192) : allocSize;
     165
     166        // now calculate the 'usable' buffer size for our end pointer
     167        int usableBuffer = (allocSize - sizeof(pool));  // pool struct is not usable space
     168        usableBuffer -= usableBuffer % size_of;         // remove any misaligned bytes from the end
     169
     170        pool* p = (pool*)malloc(allocSize);
     171        p->sizeOf = size_of;
     172        p->available = usableBuffer / size_of;          // number of nodes available
     173        p->freed = 0;
     174        p->list = NULL;
     175        p->end = p->buffer + usableBuffer;
     176        return p;
     177    }
     178
     179    /**
     180     * Clears the current state of the pool and resets
     181     * all the variables of the pool
     182     */
     183    inline void clear()
     184    {
     185        available = (end - buffer) / sizeOf;
     186        freed = 0;
     187        list = NULL;
     188    }
     189
     190    /**
     191     * Destroys the pool
     192     */
     193    inline static void destroy(pool* p)
     194    {
     195        free(p);
     196    }
     197
     198    // unsafe version for dynamic_pool
     199    inline void* _alloc_new()
     200    {
     201        #if POOL_NDEBUG
     202            return (buffer + sizeOf * --available);
     203        #else
     204            return ((node*)(buffer + sizeOf * --available))->data;
     205        #endif
     206    }
     207
     208    // unsafe version for dynamic_pool
     209    inline void* _alloc_freed() // gets from the freed list
     210    {
     211        --freed;
     212        #if POOL_NDEBUG
     213            void* ptr = (void*)list;
     214        #else
     215            ENSURE(list->guard == POOL_GUARDBYTES && "Invalid write to dealloc()-ed pointer detected!");
     216            void* ptr = list->data;
     217        #endif
     218        list = list->next; // unshift the linked list
     219        return ptr;
     220    }
     221
     222    // unsafe version for dynamic_pool
     223    inline void _dealloc(void* ptr)
     224    {
     225        ++freed;
     226        #if  POOL_NDEBUG
     227            ((node*)ptr)->next = list;  // shift the linked list
     228            list = (node*)ptr;          // store the beginning of the linked list
     229        #else
     230            node* n = (node*)((char*)ptr - sizeof(void*));
     231            n->guard = POOL_GUARDBYTES;
     232            n->next = list;
     233            list = n;
     234        #endif
     235    }
     236
     237    /**
     238     * @return A new object from this pool, or NULL if no more handles available.
     239     */
     240    inline void* alloc() // gets a new item
     241    {
     242        if(freed)
     243        {
     244            --freed;
     245            #if  POOL_NDEBUG
     246                void* ptr = (void*)list;
     247            #else
     248                ENSURE(list->guard == POOL_GUARDBYTES && "Invalid write to dealloc()-ed pointer detected!");
     249                void* ptr = list->data;
     250            #endif
     251            list = list->next; // unshift the linked list
     252            return ptr;
     253        }
     254        else if(available)
     255        {
     256            #if POOL_NDEBUG
     257                return (buffer + sizeOf * --available);
     258            #else
     259                return ((node*)(buffer + sizeOf * --available))->data;
     260            #endif
     261        }
     262        return NULL; // its empty
     263    }
     264
     265    /**
     266     * Deallocates a pointer
     267     * @param ptr Pointer
     268     */
     269    inline void dealloc(void* ptr) // puts into the freed list
     270    {
     271        ++freed;
     272        #if POOL_NDEBUG
     273            ((node*)ptr)->next = list;  // shift the linked list
     274            list = (node*)ptr;          // store the beginning of the linked list
     275        #else
     276            ENSURE(is_owner(ptr) && "Given pointer does not belong to this pool!");
     277            node* n = (node*)((char*)ptr - sizeof(void*));
     278            n->guard = POOL_GUARDBYTES;
     279            n->next = list;
     280            list = n;
     281        #endif
     282    }
     283
     284    /**
     285     * @return TRUE if the pointer is in range of this pool
     286     */
     287    inline bool is_owner(void* ptr) const
     288    {
     289        return buffer <= ptr && ptr < end;
     290    }
     291
     292    /**
     293     * @return Number of objects currently allocated
     294     */
     295    inline unsigned alloc_count() const
     296    {
     297        return ((end - buffer) / sizeOf) - (available + freed); // reserved - free
     298    }
     299
     300    /**
     301     * @return Number of objects available to allocate
     302     */
     303    inline unsigned free_count() const
     304    {
     305        return available + freed;
     306    }
     307
     308    /**
     309     * @return Number of objects reserved
     310     */
     311    inline unsigned reserve_count() const
     312    {
     313        return (end - buffer) / sizeOf;
     314    }
     315
     316    /**
     317     * @return Number of objects currently allocated
     318     */
     319    inline unsigned alloc_bytes() const
     320    {
     321        return (end - buffer) - (available + freed) * sizeOf; // reserved - free
     322    }
     323
     324    /**
     325     * @return Number of objects available to allocate
     326     */
     327    inline unsigned free_bytes() const
     328    {
     329        return (available + freed) * sizeOf;
     330    }
     331
     332    /**
     333     * @return Number of objects reserved
     334     */
     335    inline unsigned reserve_bytes() const
     336    {
     337        return end - buffer;
     338    }
     339};
     340
     341
     342
     343
     344
     345
     346
     347
     348/**
     349 * Dynamic pool is a memory pool that dynamically increases it size
     350 * to handle more and more requests whenever needed.
     351 *
     352 * @param GC [no_gc] Set to [use_gc] if you wish this dynamic_pool to trigger
     353 *           garbage collection if a pool* gets empty
     354 */
     355template<pool_gc_t GC = no_gc> class dynamic_pool
     356{
     357public:
     358    pool** pools;
     359    unsigned pools_count;
     360    unsigned pools_capacity;
     361    unsigned pool_sizeOf;       // size in bytes of each element
     362    unsigned pool_sizehint;     // size hint for pool objects in number of elements
     363
     364
     365    /**
     366     * Creates a new pool and automatically calculates pool size (quite aggressively).
     367     */
     368    inline dynamic_pool(unsigned sizeOf) : pools(0), pools_count(0), pools_capacity(0),
     369        pool_sizeOf(sizeOf), pool_sizehint(POOL_AUTOSIZE(sizeOf))
     370    {
     371    }
     372
     373    /**
     374     * Creates a new pool with the given pool size hint
     375     */
     376    inline dynamic_pool(unsigned sizeOf, unsigned poolSizeHint) : pools(0), pools_count(0), pools_capacity(0),
     377        pool_sizeOf(sizeOf), pool_sizehint(poolSizeHint)
     378    {
     379    }
     380
     381    /**
     382     * Destroys the entire pool
     383     */
     384    inline ~dynamic_pool()
     385    {
     386        destroy();
     387    }
     388
     389    /**
     390     * This function destroys all the pools in this dynamic_pool
     391     * The container array for pool objects is also destroyed
     392     */
     393    void destroy()
     394    {
     395        int i = pools_count;
     396        if(!i) return; // early return if no pools
     397        while(i)
     398            pool::destroy(pools[--i]);
     399
     400        free(pools);
     401        pools = NULL;
     402        pools_count = 0;
     403        pools_capacity = 0;
     404    }
     405
     406    /**
     407     * @return A new memory block of size <TSIZE>
     408     */
     409    void* alloc() // gets a new handle
     410    {
     411        int i = pools_count;
     412        while(i) // backwards iteration
     413        {
     414            pool* p = pools[--i];
     415            if(p->freed) // any in the 'freed' list?
     416                return p->_alloc_freed();
     417            else if(p->available) // any in the buffer itself?
     418                return p->_alloc_new();
     419        }
     420
     421        if(pools_count == pools_capacity)
     422        {
     423            pools_capacity += 4; // this is really bad
     424            pools = (pool**)realloc(pools, sizeof(pool*) * pools_capacity);
     425        }
     426        pool* p = pool::create(pool_sizehint, pool_sizeOf);
     427        pools[pools_count++] = p;
     428        return p->_alloc_new(); // this will definitely succeed
     429    }
     430
     431    /**
     432     * Deallocates a pointer by returning it to the pool
     433     */
     434    void dealloc(void* ptr) // deletes an existing handle
     435    {
     436        int i = pools_count;
     437        while(i) // backwards iteration
     438        {
     439            pool* p = pools[--i];
     440            if(p->is_owner(ptr)) // belongs to this pool?
     441            {   
     442                p->_dealloc(ptr); // good. put it there.
     443
     444                // @note We need general garbage collection for the _global_pool objects
     445                // run garbage collection if and only if:
     446                // 1) GC is enabled for this template
     447                // 2) there are more than 1 pools
     448                // 3) the pool is now empty
     449                if(GC && pools_count != 1 && p->alloc_count() == 0)
     450                    erase_at(i);
     451                return;
     452            }
     453        }
     454
     455        // on the global bucket_allocator this happens if you dealloc() from wrong thread
     456        // otherwise it's an invalid pointer or belongs to another pool
     457        ENSURE(false && "Pointer does not belong to this dynamic_pool!");
     458    }
     459
     460    /**
     461     * Clears all the pools. (!) Does NOT free any memory (!) Pools have their max capacity restored!
     462     */
     463    void clear()
     464    {
     465        for(int i = pools_count; i; )
     466            pools[--i]->clear();
     467    }
     468
     469    /**
     470     * Destroys pools that are empty
     471     */
     472    void clean_pools()
     473    {
     474        int i = pools_count;
     475        if(!i) return; // early return to avoid the free block in the end
     476        while(i)
     477        {
     478            pool* p = pools[--i];
     479            if(p->alloc_count() == 0)
     480            {
     481                erase_at(i);
     482                continue;
     483            }
     484        }
     485        if(pools_count == 0)
     486        {
     487            free(pools), pools = NULL;
     488            pools_capacity = 0;
     489        }
     490    }
     491
     492
     493private:
     494    void erase_at(int i) // erases the pool at the given index
     495    {
     496        pool** at = pools + i;
     497        pool::destroy(*at);
     498        if(int itemsToShift = pools_count - i - 1)
     499            memmove(at, at + 1, itemsToShift * sizeof(pool*)); // unshift the pools array
     500        --pools_count;
     501    }
     502
     503
     504public:
     505    /**
     506     * @return Number of currently allocated objects
     507     */
     508    unsigned alloc_count() const
     509    {
     510        unsigned count = 0;
     511        for(unsigned i = 0; i < pools_count; ++i)
     512            count += pools[i]->alloc_count();
     513        return count;
     514    }
     515   
     516    /**
     517     * @return Number of objects available for allocation
     518     */
     519    unsigned free_count() const
     520    {
     521        unsigned count = 0;
     522        for(unsigned i = 0; i < pools_count; ++i)
     523            count += pools[i]->free_count();
     524        return count;
     525    }
     526
     527    /**
     528     * @return Total number of objects reserved
     529     */
     530    unsigned reserve_count() const
     531    {
     532        unsigned count = 0;
     533        for(unsigned i = 0; i < pools_count; ++i)
     534            count += pools[i]->reserve_count();
     535        return count;
     536    }
     537
     538    /**
     539     * @return Number of bytes currently allocated
     540     */
     541    unsigned alloc_bytes() const
     542    {
     543        unsigned numBytes = 0;
     544        for(unsigned i = 0; i < pools_count; ++i)
     545            numBytes += pools[i]->alloc_bytes();
     546        return numBytes;
     547    }
     548
     549    /**
     550     * @return Number of bytes available for allocation
     551     */
     552    unsigned free_bytes() const
     553    {
     554        unsigned numBytes = 0;
     555        for(unsigned i = 0; i < pools_count; ++i)
     556            numBytes += pools[i]->free_bytes();
     557        return numBytes;
     558    }
     559
     560    /**
     561     * @return Number of bytes reserved for allocations
     562     */
     563    unsigned reserve_bytes() const
     564    {
     565        unsigned numBytes = 0;
     566        for(unsigned i = 0; i < pools_count; ++i)
     567            numBytes += pools[i]->reserve_bytes();
     568        return numBytes;
     569    }
     570};
     571
     572
     573
     574
     575
     576/**
     577 * Global pools use garbage collection (automatic cleanup on low capacity)
     578 */
     579typedef dynamic_pool<use_gc> global_pool_t;
     580
     581
     582
     583
     584
     585/**
     586 * Gets a Thread-Local-Storage Pool suitable for the specified size request.
     587 * @note This only works for requestSize range [0, 1024]. Otherwise undefined behaviour. Use wisely.
     588 * @param requestSize Size of the memory request in bytes.
     589 * @return A Thread-Local-Storage Pool for this size request.
     590 */
     591global_pool_t* _get_tls_pool(uint32_t requestSize);
     592
     593
     594
     595
     596
     597/**
     598 * A bucket allocator is a special allocator that divides allocations into fixed-size memory pools.
     599 * These pools are thread-local, so an assertion failure is triggered if delete is called from a wrong thread.
     600 * @note This special allocator pools allocations between [4..1024] bytes
     601 * @note General vector allocator for STL
     602 */
     603template<class T> class bucket_allocator : public std::allocator<T>
     604{
     605public:
     606    typedef size_t      size_type;
     607    typedef ptrdiff_t   difference_type;
     608    typedef T*          pointer;
     609    typedef const T*    const_pointer;
     610    typedef T&          reference;
     611    typedef const T&    const_reference;
     612    typedef T           value_type;
     613    template<class X> struct rebind { typedef bucket_allocator<X> other; };
     614
     615    inline bucket_allocator() throw()  {}
     616    inline bucket_allocator(const bucket_allocator& a) throw() : std::allocator<T>(a) {}
     617    template<class X> inline bucket_allocator(const bucket_allocator<X>&) throw()  {}
     618    inline ~bucket_allocator() throw()  {}
     619    inline bucket_allocator select_on_container_copy_construction() const throw() { return *this; }
     620
     621    inline pointer address(reference x) const throw()  { return &x; }
     622    inline const_pointer address(const_reference x) const throw()  { return &x; }
     623    inline size_type max_size() const throw()  { return size_t(-1) / sizeof(T); }
     624    inline void construct(pointer p, const_reference v) { ::new(p) T(v); }
     625    inline void destroy(pointer p) { p->~T(); (void)p; }
     626
     627    /**
     628     * @note This should be tested for all vectors:
     629     * @note The size 1024 bytes is deduced from MTuner profiling data. Everything over that seems to be irrelevant.
     630     */
     631    pointer allocate(size_type n)
     632    {
     633        const uint32_t requestSize = n * sizeof(T); // requested bytes
     634        if(requestSize <= POOL_MAX_BUCKET_SIZE) // default MAX: 1024 bytes
     635        {
     636            return (pointer)_get_tls_pool(requestSize)->alloc();
     637        }
     638        void* mem = malloc(requestSize);
     639        if(!mem)
     640        {
     641            throw std::bad_alloc();
     642        }
     643        return (pointer)mem;
     644    }
     645
     646    void deallocate(pointer p, size_type n)
     647    {
     648        const uint32_t requestSize = n * sizeof(T); // requested bytes
     649        if(requestSize <= POOL_MAX_BUCKET_SIZE) // default MAX: 1024 bytes
     650        {
     651            return _get_tls_pool(requestSize)->dealloc(p);
     652        }
     653        free(p);
     654    }
     655};
     656
     657
     658
     659
     660
     661#endif // INCLUDED_MEMORY_POOL
     662 No newline at end of file
  • lib/allocators/tests/test_allocators.h

     
    2525#include "lib/allocators/dynarray.h"
    2626#include "lib/byte_order.h"
    2727
     28#include "lib/ps_stl.h"
     29#include "lib/timer.h" // for the performance measurements
     30
    2831class TestAllocators : public CxxTest::TestSuite
    2932{
    3033public:
     34
    3135    void test_da()
    3236    {
    3337        DynArray da;
     
    3741        TS_ASSERT_OK(da_set_size(&da, 1000));
    3842        TS_ASSERT_OK(da_free(&da));
    3943    }
     44
     45
     46    void test_pool_gc()
     47    {
     48        static const int ITERS = 3000;
     49        static void* pointers[ITERS];
     50       
     51        dynamic_pool<use_gc> mempool(sizeof(int), 1024); // the pool we're testing
     52
     53        // try to allocate way over pool limits:
     54        for(int i = 0; i < ITERS; ++i)
     55        {
     56            pointers[i] = mempool.alloc();
     57        }
     58
     59        TS_ASSERT(mempool.pools_count > 1); // pools count should be more than 1
     60
     61        // now lets deallocate all the pointers
     62        for(int i = ITERS; i != 0; )
     63        {
     64            mempool.dealloc(pointers[--i]);
     65        }
     66
     67        TS_ASSERT(mempool.pools_count == 1); // pools count MUST be 1 now
     68
     69        printf("\npool_gc done.\n");
     70    }
     71
     72
     73    // @note If these don't crash, then the allocator works pretty well :)
     74    void test_stl_containers()
     75    {
     76        ps::vector<int> vec;
     77       
     78        // fill it up
     79        for(int i = 0; i < 32; ++i)
     80            vec.push_back(i);
     81       
     82        // now we test afterwards to make sure reallocs didn't change stuff
     83        for(int i = 0; i < 32; ++i)
     84            TS_ASSERT(vec[i] == i);
     85
     86        ps::vector<int>  vec2 = vec;
     87        vec2 = vec;
     88        ps::vector<int>  vec3 = ps::vector<int> ();
     89        vec3 = ps::vector<int>();
     90
     91
     92        // @note pretty much same as ps::vector test above
     93
     94        ps::map<int, int> map;
     95        for(int i = 0; i < 32; ++i)
     96            map[i] = i;
     97        for(int i = 0; i < 32; ++i)
     98            TS_ASSERT(map[i] == i);
     99
     100        ps::map<int, int> map2 = map;
     101        map2 = map;
     102        ps::map<int, int> map3 = ps::map<int, int>();
     103        map3 = ps::map<int, int>();
     104
     105        printf("stl containers done.\n");
     106    }
     107
     108
     109
     110
     111
     112#define PERF_ITERS 120000 // 120k iters
     113#define FRAG_ITERS 40000 // 40k iters
     114
     115    struct int1x { int x; };
     116    struct int2x { int x, y; };
     117    struct int3x { int x, y, z; };
     118
     119
     120    // @note Tests the random allocation speed of the stock allocator
     121    void test_std_alloc_fragmented_DISABLED()
     122    {
     123        static int1x* pointers1[FRAG_ITERS];
     124        static int2x* pointers2[FRAG_ITERS];
     125        static int1x* pointers11[FRAG_ITERS];
     126
     127        double start = timer_Time();
     128        for(int i = 0; i < FRAG_ITERS; ++i)
     129        {
     130            pointers1[i] = new int1x;
     131            pointers2[i] = new int2x;
     132            int3x* frag = new int3x;
     133            delete frag;
     134            pointers11[i] = new int1x;
     135
     136        }
     137        for(int i = FRAG_ITERS; i != 0; )
     138        {
     139            --i;
     140            delete pointers1[i];
     141            delete pointers2[i];
     142            delete pointers11[i];
     143        }
     144        double elapsed = timer_Time() - start;
     145        printf("std_alloc fragmented:     %dms\n", int(elapsed*1000));
     146    }
     147
     148    // @note Tests the growing speed of the stock allocator
     149    void test_std_alloc_growshrink_DISABLED()
     150    {
     151        static int* pointers[PERF_ITERS]; // array for perf test pointers
     152
     153        double start = timer_Time();
     154        for(int i = 0; i < PERF_ITERS; ++i)
     155        {
     156            pointers[i] = new int;
     157        }
     158        for(int i = PERF_ITERS; i != 0; )
     159        {
     160            delete pointers[--i];
     161        }
     162        double elapsed = timer_Time() - start;
     163        printf("std_alloc growshrink:     %dms\n", int(elapsed*1000));
     164    }
     165
     166
     167    // @note Tests the temporary new/delete speed of the stock allocator
     168    void test_std_alloc_temporaries_DISABLED()
     169    {
     170        double start = timer_Time();
     171        for(int i = 0; i < PERF_ITERS; ++i)
     172        {
     173            volatile int* temp = new int;
     174            *temp = 0;
     175            delete temp;
     176        }
     177        double elapsed = timer_Time() - start;
     178        printf("std_alloc temporaries:    %dms\n", int(elapsed*1000));
     179    }
     180
     181
     182
     183
     184    // @note Tests the random allocation speed of the bucket allocator
     185    void test_tls_bucket_fragmented_DISABLED()
     186    {
     187        static int1x* pointers1[FRAG_ITERS];
     188        static int2x* pointers2[FRAG_ITERS];
     189        static int1x* pointers11[FRAG_ITERS];
     190
     191        bucket_allocator<int1x> first;
     192        bucket_allocator<int2x> second;
     193        bucket_allocator<int3x> third;
     194
     195        // @todo This doesn't exactly simulate frags yet
     196        double start = timer_Time();
     197        for(int i = 0; i < FRAG_ITERS; ++i)
     198        {
     199            pointers1[i] = first.allocate(1);
     200            pointers2[i] = second.allocate(1);
     201            int3x* frag = third.allocate(1);
     202            third.deallocate(frag, 1);
     203            pointers11[i] = first.allocate(1);
     204        }
     205        for(int i = FRAG_ITERS; i != 0; )
     206        {
     207            --i;
     208            first.deallocate(pointers1[i], 1);
     209            second.deallocate(pointers2[i], 1);
     210            first.deallocate(pointers11[i], 1);
     211        }
     212        double elapsed = timer_Time() - start;
     213        printf("tls_bucket fragmented:    %dms\n", int(elapsed*1000));
     214    }
     215
     216
     217    // @note Tests the growing speed of the bucket allocator
     218    void test_tls_bucket_growshrink_DISABLED()
     219    {
     220        static int* pointers[PERF_ITERS]; // array for perf test pointers
     221        bucket_allocator<int> balloc;
     222       
     223        double start = timer_Time();
     224        for(int i = 0; i < PERF_ITERS; ++i)
     225        {
     226            pointers[i] = balloc.allocate(1);
     227        }
     228        for(int i = PERF_ITERS; i != 0; )
     229        {
     230            balloc.deallocate(pointers[--i], 1);
     231        }
     232        double elapsed = timer_Time() - start;
     233        printf("tls_bucket growshrink:    %dms\n", int(elapsed*1000));
     234    }
     235
     236
     237    // @note Tests the temporary new/delete speed of the bucket allocator
     238    void test_tls_bucket_temporaries_DISABLED()
     239    {
     240        bucket_allocator<int> balloc;
     241
     242        double start = timer_Time();
     243        for(int i = 0; i < PERF_ITERS; ++i)
     244        {
     245            int* temp = balloc.allocate(1);
     246            *temp = 0;
     247            balloc.deallocate(temp, 1);
     248        }
     249        double elapsed = timer_Time() - start;
     250        printf("tls_bucket temporaries:   %dms\n", int(elapsed*1000));
     251    }
     252
     253
     254
     255
     256
     257
     258
     259
     260    template<class Vector> void stress_vector()
     261    {
     262        Vector vec;
     263        for(int i = 0; i < 32; ++i)
     264            vec.push_back(i);
     265        Vector vec2 = vec;
     266        vec2 = vec;
     267        Vector vec3 = Vector();
     268        vec3 = Vector();
     269    }
     270    template<class String> void stress_string()
     271    {
     272        String str;
     273        for(int i = 0; i < 32; ++i)
     274            str.push_back('A');
     275        String str2 = str;
     276        str2 = str;
     277        String str3 = String();
     278        str3 = String();
     279    }
     280    template<class Map> void stress_map()
     281    {
     282        Map map;
     283        for(int i = 0; i < 32; ++i)
     284            map[i] = i;
     285        Map map2 = map;
     286        map2 = map;
     287        Map map3 = Map();
     288        map3 = Map();
     289    }
     290    template<class Set> void stress_set()
     291    {
     292        Set set;
     293        for(int i = 0; i < 32; ++i)
     294            set.insert(i);
     295        Set set2 = set;
     296        set2 = set;
     297        Set set3 = Set();
     298        set3 = Set();
     299    }
     300
     301
     302#define STRESS_ITERS 1000
     303
     304    void test_STD_STL_stresstest_DISABLED()
     305    {
     306        double start = timer_Time();
     307        for(int i = 0; i < STRESS_ITERS; ++i)
     308        {
     309            stress_vector<std::vector<int> >();
     310            stress_vector<std::list<int> >();
     311            stress_string<std::string>();
     312            stress_map<std::map<int, int> >();
     313            stress_set<std::set<int> >();
     314            stress_map<stl_unordered_map<int, int> >();
     315            stress_set<stl_unordered_set<int> >();
     316        }
     317        double elapsed = timer_Time() - start;
     318        printf("stress_test std::stl:     %dms\n", int(elapsed*1000));
     319    }
     320
     321    void test_PS_STL_stresstest_DISABLED()
     322    {
     323        double start = timer_Time();
     324        for(int i = 0; i < STRESS_ITERS; ++i)
     325        {
     326            stress_vector<ps::vector<int> >();
     327            stress_vector<ps::list<int> >();
     328            stress_string<ps::string>();
     329            stress_map<ps::map<int, int> >();
     330            stress_set<ps::set<int> >();
     331            stress_map<ps::unordered_map<int, int> >();
     332            stress_set<ps::unordered_set<int> >();
     333        }
     334        double elapsed = timer_Time() - start;
     335        printf("stress_test ps::stl:      %dms\n", int(elapsed*1000));
     336    }
     337
    40338};
  • lib/ps_stl.h

     
     1/* Copyright (c) 2013 Wildfire Games
     2 *
     3 * Permission is hereby granted, free of charge, to any person obtaining
     4 * a copy of this software and associated documentation files (the
     5 * "Software"), to deal in the Software without restriction, including
     6 * without limitation the rights to use, copy, modify, merge, publish,
     7 * distribute, sublicense, and/or sell copies of the Software, and to
     8 * permit persons to whom the Software is furnished to do so, subject to
     9 * the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included
     12 * in all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     17 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
     18 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     19 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     20 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22#pragma once
     23#ifndef INCLUDED_PS_STL
     24#define INCLUDED_PS_STL
     25
     26/**
     27 * @author Jorma Rebane
     28 * @date 2013.06.27
     29 * @note Pyrogenesis STL wrapper
     30 * @note This file contains STL container wrappers with custom allocators.
     31 * @note It's a quick fix to increase STL container performance.
     32 */
     33
     34#include "allocators/memory_pool.h"
     35
     36#include <vector>
     37#include <string> // this one gets used sooo much
     38#include <sstream>
     39#include <list>
     40#include <map>
     41#include <set>
     42#if HAVE_CPP0X // unordered variants only came out in C++11
     43    #include <unordered_map>
     44    #include <unordered_set>
     45    #define stl_hash std::hash
     46    #define stl_unordered_map std::unordered_map
     47    #define stl_unordered_set std::unordered_set
     48#else
     49    #include <boost/unordered_map.hpp>
     50    #include <boost/unordered_set.hpp>
     51    #define stl_hash boost::hash
     52    #define stl_unordered_map boost::unordered_map
     53    #define stl_unordered_set boost::unordered_set
     54#endif
     55
     56
     57
     58#ifndef USE_POOL_ALLOCATORS     // you can define this as an override to enable/disable the pool allocator
     59#define USE_POOL_ALLOCATORS 1   // by default we enable pool allocator
     60#endif
     61
     62
     63#if USE_POOL_ALLOCATORS
     64#define BUCKET_ALLOCATOR(...) , bucket_allocator< __VA_ARGS__ >
     65#else
     66#define BUCKET_ALLOCATOR(...)
     67#endif
     68
     69
     70#ifndef HAS_CPP11_TEMPLATE_ALIASING
     71// if C++11 && (clang 3.0+ or gcc 4.7+)
     72#define HAS_CPP11_TEMPLATE_ALIASING HAVE_CPP0X && (CLANG_VERSION >= 300 || GCC_VERSION >= 470)
     73#endif
     74
     75namespace ps {
     76
     77
     78
     79
     80    /**
     81     * @note std::string wrappers with the bucket_allocator
     82     */
     83    typedef std::basic_string<char, std::char_traits<char> BUCKET_ALLOCATOR(char) > string;
     84    typedef std::basic_string<wchar_t, std::char_traits<wchar_t> BUCKET_ALLOCATOR(wchar_t) > wstring;
     85    typedef std::basic_stringstream<char, std::char_traits<char> BUCKET_ALLOCATOR(char) > stringstream;
     86    typedef std::basic_stringstream<wchar_t, std::char_traits<wchar_t> BUCKET_ALLOCATOR(wchar_t) > wstringstream;
     87
     88
     89    /**
     90     * String conversion for compatibility with std::string
     91     */
     92    inline string to_string(const std::string& s) { return string(s.c_str(), s.length()); }
     93    inline wstring to_string(const std::wstring& s) { return wstring(s.c_str(), s.length()); }
     94
     95
     96    /**
     97     * Unsafe re-casting. Be careful with this!!
     98     */
     99    inline const string& string_ref(const std::string& s) { return *(const string*)&s; }
     100    inline const wstring& string_ref(const std::wstring& s) { return *(const wstring*)&s; }
     101
     102
     103
     104
     105
     106#if HAS_CPP11_TEMPLATE_ALIASING
     107
     108    template<class T>
     109        using psvector = std::vector<T BUCKET_ALLOCATOR(T)>;
     110
     111
     112    template<class T>
     113        using list = std::list<T BUCKET_ALLOCATOR(T)>;
     114
     115
     116    template<class Key, class T, class Pred = std::less<Key>>
     117        using map = std::map<Key, T, Pred BUCKET_ALLOCATOR(std::pair<const Key, T>)>;
     118
     119
     120    template<class T, class Pred = std::less<T>>
     121        using set = std::set<T, Pred BUCKET_ALLOCATOR(T)>;
     122
     123
     124    template<class Key, class T, class Hash = stl_hash<Key>, class Pred = std::equal_to<Key>>
     125        using unordered_map = std::unordered_map<Key, T, Hash, Pred BUCKET_ALLOCATOR(std::pair<const Key, T>)>;
     126
     127
     128    template<class Key, class Hash = stl_hash<Key>, class Pred = std::equal_to<Key>>
     129        using unordered_set = std::unordered_set<Key, Hash, Pred BUCKET_ALLOCATOR(Key)>;
     130
     131#else // !HAS_CPP11_TEMPLATE_ALIASING
     132
     133
     134    /**
     135     * Overrides std::list and plugs in a bucket_allocator as the allocator.
     136     * @note The bucket_allocator allocates from global thread-local memory pools based on the chunk size.
     137     */
     138    template<class T> class vector
     139        :public std::vector<T BUCKET_ALLOCATOR(T) >
     140    {
     141    public:
     142        typedef std::vector<T BUCKET_ALLOCATOR(T) > base;
     143
     144        inline vector() : base() {}
     145        inline explicit vector(size_t count) : base(count) {}
     146        inline vector(size_t count, const T& value) : base(count, value) {}
     147        template<class Iter> inline vector(Iter first, Iter last) : base(first, last) {}
     148
     149
     150        inline vector(const vector& right) : base(right) {}
     151        inline vector& operator=(const vector& right) { base::operator=(right); return *this; }
     152    #if HAVE_CPP0X
     153        inline vector(const vector&& right) : base(std::move(right)) {}
     154        inline vector& operator=(const vector&& right) { base::operator=(std::move(right)); return *this; }
     155    #endif
     156    };
     157
     158
     159
     160
     161
     162    /**
     163     * Overrides std::list and plugs in a bucket_allocator as the allocator.
     164     * @note The bucket_allocator allocates from global thread-local memory pools based on the chunk size.
     165     */
     166    template<class T> class list
     167        :public std::list<T BUCKET_ALLOCATOR(T) >
     168    {
     169    public:
     170        typedef std::list<T BUCKET_ALLOCATOR(T) > base;
     171
     172        inline list() : base() {}
     173        inline explicit list(size_t count) : base(count) {}
     174        inline list(size_t count, const T& value) : base(count, value) {}
     175        template<class Iter> inline list(Iter first, Iter last) : base(first, last) {}
     176
     177
     178        inline list(const list& right) : base(right) {}
     179        inline list& operator=(const list& right) { base::operator=(right); return *this; }
     180    #if HAVE_CPP0X
     181        inline list(const list&& right) : base(std::move(right)) {}
     182        inline list& operator=(const list&& right) { base::operator=(std::move(right)); return *this; }
     183    #endif
     184    };
     185
     186
     187
     188
     189
     190    /**
     191     * Overrides std::map and plugs in a bucket_allocator as the allocator.
     192     * @note The bucket_allocator allocates from global thread-local memory pools based on the chunk size.
     193     */
     194    template<class Key, class T, class Pred = std::less<Key> > class map
     195        :public std::map<Key, T, Pred BUCKET_ALLOCATOR(std::pair<const Key, T>) >
     196    {
     197    public:
     198        typedef std::map<Key, T, Pred BUCKET_ALLOCATOR(std::pair<const Key, T>) > base;
     199
     200        inline map() : base() {}
     201        inline explicit map(const Pred& pred) : base(pred) {}
     202        template<class Iter> inline map(Iter first, Iter last) : base(first, last) {}
     203        template<class Iter> inline map(Iter first, Iter last, const Pred& pred) : base(first, last, pred) {}
     204
     205
     206        inline map(const map& right) : base(right) {}
     207        inline map& operator=(const map& right) { base::operator=(right); return *this; }
     208    #if HAVE_CPP0X
     209        inline map(const map&& right) : base(std::move(right)) {}
     210        inline map& operator=(const map&& right) { base::operator=(std::move(right)); return *this; }
     211    #endif
     212    };
     213
     214
     215
     216
     217
     218    /**
     219     * Overrides std::set and plugs in a bucket_allocator as the allocator.
     220     * @note The bucket_allocator allocates from global thread-local memory pools based on the chunk size.
     221     */
     222    template<class T, class Pred = std::less<T> > class set
     223        :public std::set<T, Pred BUCKET_ALLOCATOR(T) >
     224    {
     225    public:
     226        typedef std::set<T, Pred BUCKET_ALLOCATOR(T) > base;
     227
     228        inline set() : base() {}
     229        inline explicit set(const Pred& pred) : base(pred) {}
     230        template<class Iter> inline set(Iter first, Iter last) : base(first, last) {}
     231        template<class Iter> inline set(Iter first, Iter last, const Pred& pred) : base(first, last, pred) {}
     232
     233
     234        inline set(const set& right) : base(right) {}
     235        inline set& operator=(const set& right) { base::operator=(right); return *this; }
     236    #if HAVE_CPP0X
     237        inline set(const set&& right) : base(std::move(right)) {}
     238        inline set& operator=(const set&& right) { base::operator=(std::move(right)); return *this; }
     239    #endif
     240    };
     241
     242
     243
     244
     245
     246    /**
     247     * Overrides std::unordered_map and plugs in a bucket_allocator as the allocator.
     248     * @note The bucket_allocator allocates from global thread-local memory pools based on the chunk size.
     249     */
     250    template<class Key, class T, class Hash = stl_hash<Key>, class Pred = std::equal_to<Key> > class unordered_map
     251        :public stl_unordered_map<Key, T, Hash, Pred BUCKET_ALLOCATOR(std::pair<const Key, T>) >
     252    {
     253    public:
     254        typedef stl_unordered_map<Key, T, Hash, Pred BUCKET_ALLOCATOR(std::pair<const Key, T>) > base;
     255
     256        inline unordered_map() : base() {}
     257        inline explicit unordered_map(size_t buckets) : base(buckets) {}
     258        inline unordered_map(size_t buckets, const Hash& hashArg) : base(buckets, hashArg) {}
     259        inline unordered_map(size_t buckets, const Hash& hashArg, const Pred& keyArg) : base(buckets, hashArg, keyArg) {}
     260        template<class Iter> inline unordered_map(Iter first, Iter last) : base(first, last) {}
     261        template<class Iter> inline unordered_map(Iter first, Iter last, size_t buckets) : base(first, last, buckets) {}
     262        template<class Iter> inline unordered_map(Iter first, Iter last, size_t buckets, const Hash& hashArg) : base(first, last, buckets, hashArg) {}
     263        template<class Iter> inline unordered_map(Iter first, Iter last, size_t buckets, const Hash& hashArg, const Pred& keyArg) : base(first, last, buckets, hashArg, keyArg) {}
     264
     265
     266        inline unordered_map(const unordered_map& right) : base(right) {}
     267        inline unordered_map& operator=(const unordered_map& right) { base::operator=(right); return *this; }
     268    #if HAVE_CPP0X
     269        inline unordered_map(const unordered_map&& right) : base(std::move(right)) {}
     270        inline unordered_map& operator=(const unordered_map&& right) { base::operator=(std::move(right)); return *this; }
     271    #endif
     272    };
     273
     274
     275
     276
     277
     278    /**
     279     * Overrides std::unordered_set and plugs in a bucket_allocator as the allocator.
     280     * @note The bucket_allocator allocates from global thread-local memory pools based on the chunk size.
     281     */
     282    template<class Key, class Hash = stl_hash<Key>, class Pred = std::equal_to<Key> > class unordered_set
     283        :public stl_unordered_set<Key, Hash, Pred BUCKET_ALLOCATOR(Key) >
     284    {
     285    public:
     286        typedef stl_unordered_set<Key, Hash, Pred BUCKET_ALLOCATOR(Key) > base;
     287
     288        inline unordered_set() : base() {}
     289        inline explicit unordered_set(size_t buckets) : base(buckets) {}
     290        inline unordered_set(size_t buckets, const Hash& hashArg) : base(buckets, hashArg) {}
     291        inline unordered_set(size_t buckets, const Hash& hashArg, const Pred& keyArg) : base(buckets, hashArg, keyArg) {}
     292        template<class Iter> inline unordered_set(Iter first, Iter last) : base(first, last) {}
     293        template<class Iter> inline unordered_set(Iter first, Iter last, size_t buckets) : base(first, last, buckets) {}
     294        template<class Iter> inline unordered_set(Iter first, Iter last, size_t buckets, const Hash& hashArg) : base(first, last, buckets, hashArg) {}
     295        template<class Iter> inline unordered_set(Iter first, Iter last, size_t buckets, const Hash& hashArg, const Pred& keyArg) : base(first, last, buckets, hashArg, keyArg) {}
     296
     297
     298        inline unordered_set(const unordered_set& right) : base(right) {}
     299        inline unordered_set& operator=(const unordered_set& right) { base::operator=(right); return *this; }
     300    #if HAVE_CPP0X
     301        inline unordered_set(const unordered_set&& right) : base(std::move(right)) {}
     302        inline unordered_set& operator=(const unordered_set&& right) { base::operator=(std::move(right)); return *this; }
     303    #endif
     304    };
     305#endif // HAS_CPP11_TEMPLATE_ALIASING
     306
     307
     308
     309
     310
     311#undef BUCKET_ALLOCATOR
     312
     313
     314
     315} // namespace ps
     316
     317#endif // INCLUDED_PS_STL
     318 No newline at end of file
  • ps/CStr.h

     
    5454
    5555#include <string>
    5656#include "ps/utf16string.h"
     57#include <lib/ps_stl.h>
    5758
    5859class CStr8;
    5960class CStrW;
     
    6768
    6869    // CONSTRUCTORS
    6970
    70     CStr() {}
    71     CStr(const tchar* String) : std::tstring(String) {}
    72     CStr(const tchar* String, size_t Length) : std::tstring(String, Length) {}
    73     CStr(const std::tstring& String) : std::tstring(String) {}
     71    inline CStr() {}
     72    inline CStr(const tchar* String) : std::tstring(String) {}
     73    inline CStr(const tchar* String, size_t Length) : std::tstring(String, Length) {}
     74    inline CStr(const std::tstring& String) : std::tstring(String) {}
     75    inline CStr(const ps::tstring& String) : std::tstring(String.c_str(), String.length()) {}
     76#if HAVE_CPP0X
     77    inline CStr(const std::tstring&& rvalue) : std::tstring(std::move(rvalue)) {}
     78    // have to do a full-copy from ps:: strings, since the allocators don't match:
     79    inline CStr(const ps::tstring&& rvalue) : std::tstring(rvalue.c_str(), rvalue.length()) {}
     80#endif
    7481
    7582    /**
    7683     * Repeat: Named constructor, to avoid overload overload.
     
    344351    size_t GetSerializedLength() const;
    345352    u8* Serialize(u8* buffer) const;
    346353    const u8* Deserialize(const u8* buffer, const u8* bufferend);
     354
     355#ifdef _UNICODE
     356#define _unimemcmp wmemcmp
     357#define _unimemicmp wcscasecmp
     358#else
     359#define _unimemcmp memcmp
     360#define _unimemicmp strncasecmp
     361#endif
     362
     363    inline bool StartsWith(const CStr& str) const
     364    {
     365        return length() >= str.length() && _unimemcmp(c_str(), str.c_str(), str.length()) == 0;
     366    }
     367    template<size_t SIZE> inline bool StartsWith(const tchar (&const_literal)[SIZE]) const
     368    {
     369        return _unimemcmp(c_str(), const_literal, (SIZE/sizeof(tchar)) - 1) == 0;
     370    }
     371
     372    template<size_t SIZE> inline bool Equals(const tchar (&const_literal)[SIZE]) const
     373    {
     374        return length() == ((SIZE/sizeof(tchar)) - 1) &&
     375            _unimemcmp(c_str(), const_literal, (SIZE/sizeof(tchar)) - 1) == 0;
     376    }
     377
     378    template<size_t SIZE> inline bool EqualsIgnoreCase(const tchar (&const_literal)[SIZE]) const
     379    {
     380        return length() == ((SIZE/sizeof(tchar)) - 1) &&
     381            _unimemicmp(c_str(), const_literal, (SIZE/sizeof(tchar)) - 1) == 0;
     382    }
     383
     384#undef _unimemicmp
     385#undef _unimemcmp
     386
    347387};
    348388
     389/**
     390 * Basic string hashing for use with std::map
     391 */
    349392static inline size_t hash_value(const CStr& s)
    350393{
    351394    return s.GetHashCode();
    352395}
    353396
     397
     398
     399
     400
    354401#endif