This Trac instance is not used for development anymore!

We migrated our development workflow to git and Gitea.
To test the future redirection, replace trac by ariadne in the page URL.

Changeset 9961 for ps


Ignore:
Timestamp:
08/04/11 19:11:16 (13 years ago)
Author:
Jan Wassenberg
Message:

cleanup and simplification of the really old h_mgr code.
replace array-of-pages data structure with demand-committed VM => allows simple allocation of new resources without having to search for the first free index. I suspect the cause of multiple reported bugs (refs #860, #899, #915) was a race. Most issues are fixed, but the pool's freelist also needs to be made thread-safe and lock-free. this has performance and 64-bit portability implications, so I'll leave it for later (Pool is due for some serious refactoring anyway).

please post a comment if this or similar issues persist.

Location:
ps/trunk/source/lib
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • ps/trunk/source/lib/allocators/pool.cpp

    r9423 r9961  
    7676    // otherwise the pool el_size setting.
    7777    const size_t el_size = p->el_size? p->el_size : Align<allocationAlignment>(size);
     78    ASSERT(el_size != 0);
    7879
    79     // note: this can never happen in pools with variable-sized elements
     80    // note: freelist is always empty in pools with variable-sized elements
    8081    // because they disallow pool_free.
    8182    void* el = mem_freelist_Detach(p->freelist);
    82     if(el)
    83         goto have_el;
    84 
    85     // alloc a new entry
     83    if(!el) // freelist empty, need to allocate a new entry
    8684    {
    8785        // expand, if necessary
     
    9391    }
    9492
    95 have_el:
    96     ENSURE(pool_contains(p, el));   // paranoia
     93    ASSERT(pool_contains(p, el));   // paranoia
    9794    return el;
    9895}
  • ps/trunk/source/lib/res/graphics/ogl_tex.cpp

    r9929 r9961  
    586586    //   workaround is that ogl_tex_set_* won't call us if the
    587587    //   same state values are being set (harmless anyway).
    588     int refs = h_get_refcnt(ht);
    589     if(refs > 1)
     588    intptr_t refs = h_get_refcnt(ht);
     589    if(intptr_t > 1)
    590590        return; // don't complain
    591591
     
    910910        // note: tex_free is safe even if this OglTex was wrapped -
    911911        //       the Tex contains a mem handle.
    912         int refs = h_get_refcnt(ht);
     912        intptr_t refs = h_get_refcnt(ht);
    913913        if(refs == 1)
    914914        {
  • ps/trunk/source/lib/res/h_mgr.cpp

    r9462 r9961  
    3737#include "lib/fnv_hash.h"
    3838#include "lib/allocators/overrun_protector.h"
     39#include "lib/allocators/pool.h"
    3940#include "lib/module_init.h"
    40 
    41 
    42 static const size_t MAX_EXTANT_HANDLES = 10000;
     41#include "lib/sysdep/cpu.h" // cpu_CAS64
     42
     43
     44namespace ERR {
     45static const Status H_IDX_INVALID   = -120000;  // totally invalid
     46static const Status H_IDX_UNUSED    = -120001;  // beyond current cap
     47static const Status H_TAG_MISMATCH  = -120003;
     48static const Status H_TYPE_MISMATCH = -120004;
     49}
     50static const StatusDefinition hStatusDefinitions[] = {
     51    { ERR::H_IDX_INVALID,   L"Handle index completely out of bounds" },
     52    { ERR::H_IDX_UNUSED,    L"Handle index exceeds high-water mark" },
     53    { ERR::H_TAG_MISMATCH,  L"Handle tag mismatch (stale reference?)" },
     54    { ERR::H_TYPE_MISMATCH, L"Handle type mismatch" }
     55};
     56STATUS_ADD_DEFINITIONS(hStatusDefinitions);
     57
     58
    4359
    4460// rationale
     
    7086//  may be larger than the field type - only shift Handle vars!)
    7187
    72 // - tag (1-based) ensures the handle references a certain resource instance.
    73 //   (field width determines maximum unambiguous resource allocs)
    74 #define TAG_BITS 32
    75 const size_t TAG_SHIFT = 0;
    76 const u32 TAG_MASK = 0xFFFFFFFF;    // safer than (1 << 32) - 1
    77 
    7888// - index (0-based) of control block in our array.
    7989//   (field width determines maximum currently open handles)
    8090#define IDX_BITS 16
    81 const size_t IDX_SHIFT = 32;
    82 const u32 IDX_MASK = (1l << IDX_BITS) - 1;
     91static const u64 IDX_MASK = (1l << IDX_BITS) - 1;
     92
     93// - tag (1-based) ensures the handle references a certain resource instance.
     94//   (field width determines maximum unambiguous resource allocs)
     95typedef i64 Tag;    // matches cpu_CAS64 type
     96#define TAG_BITS 48
     97static const u64 TAG_MASK = 0xFFFFFFFF; // safer than (1 << 32) - 1
    8398
    8499// make sure both fields fit within a Handle variable
     
    88103// return the handle's index field (always non-negative).
    89104// no error checking!
    90 static inline u32 h_idx(const Handle h)
    91 {
    92     return (u32)((h >> IDX_SHIFT) & IDX_MASK) - 1;
     105static inline size_t h_idx(const Handle h)
     106{
     107    return (size_t)(h & IDX_MASK) - 1;
    93108}
    94109
    95110// return the handle's tag field.
    96111// no error checking!
    97 static inline u32 h_tag(const Handle h)
    98 {
    99     return (u32)((h >> TAG_SHIFT) & TAG_MASK);
     112static inline Tag h_tag(Handle h)
     113{
     114    return h >> IDX_BITS;
    100115}
    101116
    102117// build a handle from index and tag.
    103118// can't fail.
    104 static inline Handle handle(const u32 _idx, const u32 tag)
    105 {
    106     const u32 idx = _idx+1;
    107     ENSURE(idx <= IDX_MASK && tag <= TAG_MASK && "handle: idx or tag too big");
    108     // somewhat clunky, but be careful with the shift:
    109     // *_SHIFT may be larger than its field's type.
    110     Handle h_idx = idx & IDX_MASK; h_idx <<= IDX_SHIFT;
    111     Handle h_tag = tag & TAG_MASK; h_tag <<= TAG_SHIFT;
    112     Handle h = h_idx | h_tag;
     119static inline Handle handle(size_t idx, u64 tag)
     120{
     121    const size_t idxPlusOne = idx+1;
     122    ENSURE(idxPlusOne <= IDX_MASK);
     123    ENSURE((tag & IDX_MASK) == 0);
     124    Handle h = tag | idxPlusOne;
    113125    ENSURE(h > 0);
    114126    return h;
     
    120132//
    121133
    122 
    123 // determines maximum number of references to a resource.
    124 static const size_t REF_BITS  = 16;
    125 static const u32 REF_MAX = (1ul << REF_BITS)-1;
    126 
    127 static const size_t TYPE_BITS = 8;
    128 
    129 
    130 // chosen so that all current resource structs are covered,
    131 // and so sizeof(HDATA) is a power of 2 (for more efficient array access
    132 // and array page usage).
    133 static const size_t HDATA_USER_SIZE = 44+64;
     134// chosen so that all current resource structs are covered.
     135static const size_t HDATA_USER_SIZE = 100;
    134136
    135137
    136138struct HDATA
    137139{
     140    // we only need the tag, because it is trivial to compute
     141    // &HDATA from idx and vice versa. storing the entire handle
     142    // avoids needing to extract the tag field.
     143    Handle h;   // NB: will be overwritten by pool_free
     144
    138145    uintptr_t key;
    139146
    140     u32 tag  : TAG_BITS;
    141 
    142     // smaller bitfields combined into 1
    143     u32 refs : REF_BITS;
    144     u32 type_idx : TYPE_BITS;
     147    intptr_t refs;
     148
     149    // smaller bit fields combined into 1
    145150    // .. if set, do not actually release the resource (i.e. call dtor)
    146151    //    when the handle is h_free-d, regardless of the refcount.
     
    168173
    169174// max data array entries. compared to last_in_use => signed.
    170 static const ssize_t hdata_cap = 1ul << IDX_BITS;
    171 
    172 // allocate entries as needed so as not to waste memory
    173 // (hdata_cap may be large). deque-style array of pages
    174 // to balance locality, fragmentation, and waste.
    175 static const size_t PAGE_SIZE = 4096;
    176 static const size_t hdata_per_page = PAGE_SIZE / sizeof(HDATA);
    177 static const size_t num_pages = hdata_cap / hdata_per_page;
    178 static HDATA* pages[num_pages];
    179 
    180 // these must be signed, because there won't always be a valid
    181 // first or last element.
    182 static ssize_t first_free = -1;     // don't want to scan array every h_alloc
    183 static ssize_t last_in_use = -1;    // don't search unused entries
     175static const ssize_t hdata_cap = (1ul << IDX_BITS)/4;
     176
     177// pool of fixed-size elements allows O(1) alloc and free;
     178// there is a simple mapping between HDATA address and index.
     179static Pool hpool;
    184180
    185181
     
    188184
    189185
    190 // get a (possibly new) array entry; array is non-contiguous.
    191 //
    192 // fails (returns 0) if idx is out of bounds, or if accessing a new page
    193 // for the first time, and there's not enough memory to allocate it.
    194 //
    195 // also used by h_data, and alloc_idx to find a free entry.
    196 static HDATA* h_data_from_idx(const ssize_t idx)
    197 {
    198     // don't compare against last_in_use - this is called before allocating
    199     // new entries, and to check if the next (but possibly not yet valid)
    200     // entry is free. tag check protects against using unallocated entries.
    201     if(idx < 0 || idx >= hdata_cap)
    202         return 0;
    203     HDATA*& page = pages[idx / hdata_per_page];
    204     if(!page)
    205     {
    206         page = (HDATA*)calloc(1, PAGE_SIZE);
    207         if(!page)
    208             return 0;
    209 
    210         // Initialise all the VfsPath members
    211         for(size_t i = 0; i < hdata_per_page; ++i)
    212             new (&page[i].pathname) VfsPath;
    213     }
    214 
    215     // note: VC7.1 optimizes the divides to shift and mask.
    216 
    217     HDATA* hd = &page[idx % hdata_per_page];
     186// get a (possibly new) array entry.
     187//
     188// fails if idx is out of bounds.
     189static Status h_data_from_idx(ssize_t idx, HDATA*& hd)
     190{
     191    // don't check if idx is beyond the current high-water mark, because
     192    // we might be allocating a new entry. subsequent tag checks protect
     193    // against using unallocated entries.
     194    if(size_t(idx) >= hdata_cap)    // also detects negative idx
     195        WARN_RETURN(ERR::H_IDX_INVALID);
     196
     197    hd = (HDATA*)(hpool.da.base + idx*hpool.el_size);
    218198    hd->num_derefs++;
    219     return hd;
     199    return INFO::OK;
     200}
     201
     202static ssize_t h_idx_from_data(HDATA* hd)
     203{
     204    if(!pool_contains(&hpool, hd))
     205        WARN_RETURN(ERR::INVALID_POINTER);
     206    return (uintptr_t(hd) - uintptr_t(hpool.da.base))/hpool.el_size;
    220207}
    221208
     
    224211// only uses (and checks) the index field.
    225212// used by h_force_close (which must work regardless of tag).
    226 static inline HDATA* h_data_no_tag(const Handle h)
     213static inline Status h_data_no_tag(const Handle h, HDATA*& hd)
    227214{
    228215    ssize_t idx = (ssize_t)h_idx(h);
     216    RETURN_STATUS_IF_ERR(h_data_from_idx(idx, hd));
    229217    // need to verify it's in range - h_data_from_idx can only verify that
    230218    // it's < maximum allowable index.
    231     if(0 > idx || idx > last_in_use)
    232         return 0;
    233     return h_data_from_idx(idx);
     219    if(uintptr_t(hd) > uintptr_t(hpool.da.base)+hpool.da.pos)
     220        WARN_RETURN(ERR::H_IDX_UNUSED);
     221    return INFO::OK;
    234222}
    235223
     
    238226// also verifies the tag field.
    239227// used by functions callable for any handle type, e.g. h_filename.
    240 static inline HDATA* h_data_tag(const Handle h)
    241 {
    242     HDATA* hd = h_data_no_tag(h);
    243     if(!hd)
    244         return 0;
    245 
    246     // note: tag = 0 marks unused entries => is invalid
    247     u32 tag = h_tag(h);
    248     if(tag == 0 || tag != hd->tag)
    249         return 0;
    250 
    251     return hd;
     228static inline Status h_data_tag(Handle h, HDATA*& hd)
     229{
     230    RETURN_STATUS_IF_ERR(h_data_no_tag(h, hd));
     231
     232    if(h != hd->h)
     233    {
     234        debug_printf(L"h_mgr: expected handle %llx, got %llx\n", hd->h, h);
     235        WARN_RETURN(ERR::H_TAG_MISMATCH);
     236    }
     237
     238    return INFO::OK;
    252239}
    253240
     
    256243// also verifies the type.
    257244// used by most functions accessing handle data.
    258 static HDATA* h_data_tag_type(const Handle h, const H_Type type)
    259 {
    260     HDATA* hd = h_data_tag(h);
    261     if(!hd)
    262         return 0;
     245static Status h_data_tag_type(const Handle h, const H_Type type, HDATA*& hd)
     246{
     247    RETURN_STATUS_IF_ERR(h_data_tag(h, hd));
    263248
    264249    // h_alloc makes sure type isn't 0, so no need to check that here.
    265250    if(hd->type != type)
    266         return 0;
    267 
    268     return hd;
    269 }
    270 
    271 
    272 //-----------------------------------------------------------------------------
    273 
    274 // idx and hd are undefined if we fail.
    275 // called by h_alloc only.
    276 static Status alloc_idx(ssize_t& idx, HDATA*& hd)
    277 {
    278     // we already know the first free entry
    279     if(first_free != -1)
    280     {
    281         idx = first_free;
    282         hd = h_data_from_idx(idx);
    283     }
    284     // need to look for a free entry, or alloc another
    285     else
    286     {
    287         // look for an unused entry
    288         for(idx = 0; idx <= last_in_use; idx++)
    289         {
    290             hd = h_data_from_idx(idx);
    291             ENSURE(hd); // can't fail - idx is valid
    292 
    293             // found one - done
    294             if(!hd->tag)
    295                 goto have_idx;
    296         }
    297 
    298         // add another
    299         // .. too many already: IDX_BITS must be increased.
    300         if(last_in_use >= hdata_cap)
    301             WARN_RETURN(ERR::LIMIT);
    302         idx = last_in_use+1;    // just incrementing idx would start it at 1
    303         hd = h_data_from_idx(idx);
    304         if(!hd)
    305             WARN_RETURN(ERR::NO_MEM);
    306             // can't fail for any other reason - idx is checked above.
    307         {   // VC6 goto fix
    308         bool is_unused = !hd->tag;
    309         ENSURE(is_unused && "invalid last_in_use");
    310         }
    311 
    312 have_idx:;
    313     }
    314 
    315     // check if next entry is free
    316     HDATA* hd2 = h_data_from_idx(idx+1);
    317     if(hd2 && hd2->tag == 0)
    318         first_free = idx+1;
    319     else
    320         first_free = -1;
    321 
    322     if(idx > last_in_use)
    323         last_in_use = idx;
    324 
    325     return INFO::OK;
    326 }
    327 
    328 
    329 static Status free_idx(ssize_t idx)
    330 {
    331     if(first_free == -1 || idx < first_free)
    332         first_free = idx;
     251    {
     252        debug_printf(L"h_mgr: expected type %ws, got %ws\n", hd->type->name, type->name);
     253        WARN_RETURN(ERR::H_TYPE_MISMATCH);
     254    }
     255
    333256    return INFO::OK;
    334257}
     
    373296    {
    374297        ssize_t idx = it->second;
    375         HDATA* hd = h_data_from_idx(idx);
    376         // found match
    377         if(hd && hd->type == type && hd->key == key)
    378         {
    379             if(remove_option == KEY_REMOVE)
    380                 key2idx->erase(it);
    381             ret = handle(idx, hd->tag);
    382             break;
    383         }
     298        HDATA* hd;
     299        if(h_data_from_idx(idx, hd) != INFO::OK)
     300            continue;
     301        if(hd->type != type || hd->key != key)
     302            continue;
     303
     304        // found a match
     305        if(remove_option == KEY_REMOVE)
     306            key2idx->erase(it);
     307        ret = hd->h;
     308        break;
    384309    }
    385310
     
    453378
    454379
    455 static u32 gen_tag()
    456 {
    457     static u32 tag;
    458     if(++tag >= TAG_MASK)
    459     {
    460         debug_warn(L"h_mgr: tag overflow - allocations are no longer unique."\
    461             L"may not notice stale handle reuse. increase TAG_BITS.");
    462         tag = 1;
    463     }
    464     return tag;
     380static Tag gen_tag()
     381{
     382    static volatile Tag tag;
     383    for(;;)
     384    {
     385        const Tag oldTag = tag;
     386        const Tag newTag = oldTag + (1ull << IDX_BITS);
     387        // it's not easy to detect overflow, because compilers
     388        // are allowed to assume it'll never happen. however,
     389        // pow(2, 64-IDX_BITS) is "enough" anyway.
     390        if(cpu_CAS64(&tag, oldTag, newTag))
     391            return newTag;
     392    }
    465393}
    466394
     
    476404        return 0;
    477405
    478     HDATA* hd = h_data_tag_type(h, type);
    479     // too many references - increase REF_BITS
    480     if(hd->refs == REF_MAX)
    481         WARN_RETURN(ERR::LIMIT);
    482 
    483     hd->refs++;
     406    HDATA* hd;
     407    RETURN_STATUS_IF_ERR(h_data_tag_type(h, type, hd)); // h_find means this won't fail
     408
     409    cpu_AtomicAdd(&hd->refs, 1);
    484410
    485411    // we are reactivating a closed but cached handle.
     
    490416    if(hd->refs == 1)
    491417    {
    492         const u32 tag = gen_tag();
    493         hd->tag = tag;
     418        const Tag tag = gen_tag();
    494419        h = handle(h_idx(h), tag);  // can't fail
     420        hd->h = h;
    495421    }
    496422
     
    530456static Handle alloc_new_handle(H_Type type, const PIVFS& vfs, const VfsPath& pathname, uintptr_t key, size_t flags, va_list* init_args)
    531457{
    532     ssize_t idx;
    533     HDATA* hd;
    534     RETURN_STATUS_IF_ERR(alloc_idx(idx, hd));
     458    HDATA* hd = (HDATA*)pool_alloc(&hpool, 0);
     459    if(!hd)
     460        WARN_RETURN(ERR::NO_MEM);
     461    new(&hd->pathname) VfsPath;
     462
     463    ssize_t idx = h_idx_from_data(hd);
     464    RETURN_STATUS_IF_ERR(idx);
    535465
    536466    // (don't want to do this before the add-reference exit,
    537467    // so as not to waste tags for often allocated handles.)
    538     const u32 tag = gen_tag();
     468    const Tag tag = gen_tag();
    539469    Handle h = handle(idx, tag);    // can't fail.
    540470
    541     hd->tag  = tag;
     471    hd->h = h;
    542472    hd->key  = key;
    543473    hd->type = type;
     
    594524//-----------------------------------------------------------------------------
    595525
    596 // currently cannot fail.
    597 static Status h_free_idx(ssize_t idx, HDATA* hd)
    598 {
    599     // only decrement if refcount not already 0.
    600     if(hd->refs > 0)
    601         hd->refs--;
     526static void h_free_hd(HDATA* hd)
     527{
     528    for(;;)
     529    {
     530        const intptr_t refs = hd->refs;
     531        if(refs <= 0)   // skip decrement
     532            break;
     533        if(cpu_CAS(&hd->refs, refs, refs-1))    // success
     534            break;
     535    }
    602536
    603537    // still references open or caching requests it stays - do not release.
    604538    if(hd->refs > 0 || hd->keep_open)
    605         return INFO::OK;
     539        return;
    606540
    607541    // actually release the resource (call dtor, free control block).
     
    631565    hd->pathname.~VfsPath();    // FIXME: ugly hack, but necessary to reclaim memory
    632566    memset(hd, 0, sizeof(*hd));
    633     new (&hd->pathname) VfsPath;    // FIXME too: necessary because otherwise it'll break if we reuse this page
    634 
    635     free_idx(idx);
    636 
    637     return INFO::OK;
     567    pool_free(&hpool, hd);
    638568}
    639569
     
    641571Status h_free(Handle& h, H_Type type)
    642572{
    643     ssize_t idx = h_idx(h);
    644     HDATA* hd = h_data_tag_type(h, type);
     573    // 0-initialized or an error code; don't complain because this
     574    // happens often and is harmless.
     575    if(h <= 0)
     576        return INFO::OK;
    645577
    646578    // wipe out the handle to prevent reuse but keep a copy for below.
     
    648580    h = 0;
    649581
    650     // h was invalid
    651     if(!hd)
    652     {
    653         // 0-initialized or an error code; don't complain because this
    654         // happens often and is harmless.
    655         if(h_copy <= 0)
    656             return INFO::OK;
    657         // this was a valid handle but was probably freed in the meantime.
    658         // complain because this probably indicates a bug somewhere.
    659         WARN_RETURN(ERR::INVALID_HANDLE);
    660     }
    661 
    662     return h_free_idx(idx, hd);
     582    HDATA* hd;
     583    RETURN_STATUS_IF_ERR(h_data_tag_type(h_copy, type, hd));
     584
     585    h_free_hd(hd);
     586    return INFO::OK;
    663587}
    664588
     
    669593void* h_user_data(const Handle h, const H_Type type)
    670594{
    671     HDATA* hd = h_data_tag_type(h, type);
    672     if(!hd)
     595    HDATA* hd;
     596    if(h_data_tag_type(h, type, hd) != INFO::OK)
    673597        return 0;
    674598
     
    689613    // don't require type check: should be useable for any handle,
    690614    // even if the caller doesn't know its type.
    691     HDATA* hd = h_data_tag(h);
    692     if(!hd)
    693     {
    694         DEBUG_WARN_ERR(ERR::LOGIC);
     615    HDATA* hd;
     616    if(h_data_tag(h, hd) != INFO::OK)
    695617        return VfsPath();
    696     }
    697618    return hd->pathname;
    698619}
     
    708629    // order (the parent resource may be reloaded first, and load the child,
    709630    // whose original data would leak).
    710     for(ssize_t i = 0; i <= last_in_use; i++)
    711     {
    712         HDATA* hd = h_data_from_idx(i);
    713         if(!hd || hd->key != key || hd->disallow_reload)
     631    for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size))
     632    {
     633        if(hd->key == 0 || hd->key != key || hd->disallow_reload)
    714634            continue;
    715635        hd->type->dtor(hd->user);
     
    719639
    720640    // now reload all affected handles
    721     for(ssize_t i = 0; i <= last_in_use; i++)
    722     {
    723         HDATA* hd = h_data_from_idx(i);
    724         if(!hd || hd->key != key || hd->disallow_reload)
     641    size_t i = 0;
     642    for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size), i++)
     643    {
     644        if(hd->key == 0 || hd->key != key || hd->disallow_reload)
    725645            continue;
    726646
    727         Handle h = handle(i, hd->tag);
    728 
    729         Status err = hd->type->reload(hd->user, vfs, hd->pathname, h);
     647        Status err = hd->type->reload(hd->user, vfs, hd->pathname, hd->h);
    730648        // don't stop if an error is encountered - try to reload them all.
    731649        if(err < 0)
    732650        {
    733             h_free(h, hd->type);
     651            h_free(hd->h, hd->type);
    734652            if(ret == 0)    // don't overwrite first error
    735653                ret = err;
     
    759677{
    760678    // require valid index; ignore tag; type checked below.
    761     HDATA* hd = h_data_no_tag(h);
    762     if(!hd || hd->type != type)
    763         WARN_RETURN(ERR::INVALID_HANDLE);
    764     u32 idx = h_idx(h);
     679    HDATA* hd;
     680    RETURN_STATUS_IF_ERR(h_data_no_tag(h, hd));
     681    if(hd->type != type)
     682        WARN_RETURN(ERR::H_TYPE_MISMATCH);
    765683    hd->keep_open = 0;
    766684    hd->refs = 0;
    767     return h_free_idx(idx, hd);
     685    h_free_hd(hd);
     686    return INFO::OK;
    768687}
    769688
     
    777696void h_add_ref(Handle h)
    778697{
    779     HDATA* hd = h_data_tag(h);
    780     if(!hd)
    781     {
    782         DEBUG_WARN_ERR(ERR::LOGIC); // invalid handle
     698    HDATA* hd;
     699    if(h_data_tag(h, hd) != INFO::OK)
    783700        return;
    784     }
    785701
    786702    ENSURE(hd->refs);   // if there are no refs, how did the caller manage to keep a Handle?!
    787     hd->refs++;
     703    cpu_AtomicAdd(&hd->refs, 1);
    788704}
    789705
     
    795711// necessary (always wrapping objects in Handles is excessive), we
    796712// provide access to the internal reference count.
    797 int h_get_refcnt(Handle h)
    798 {
    799     HDATA* hd = h_data_tag(h);
    800     if(!hd)
    801         WARN_RETURN(ERR::INVALID_HANDLE);
     713intptr_t h_get_refcnt(Handle h)
     714{
     715    HDATA* hd;
     716    RETURN_STATUS_IF_ERR(h_data_tag(h, hd));
    802717
    803718    ENSURE(hd->refs);   // if there are no refs, how did the caller manage to keep a Handle?!
     
    810725static Status Init()
    811726{
     727    RETURN_STATUS_IF_ERR(pool_create(&hpool, hdata_cap*sizeof(HDATA), sizeof(HDATA)));
    812728    return INFO::OK;
    813729}
     
    818734
    819735    // forcibly close all open handles
    820     for(ssize_t i = 0; i <= last_in_use; i++)
    821     {
    822         HDATA* hd = h_data_from_idx(i);
    823         // can't fail - i is in bounds by definition, and
    824         // each HDATA entry has already been allocated.
    825         if(!hd)
    826         {
    827             DEBUG_WARN_ERR(ERR::LOGIC); // h_data_from_idx failed - why?!
    828             continue;
    829         }
    830 
     736    for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size))
     737    {
    831738        // it's already been freed; don't free again so that this
    832739        // doesn't look like an error.
    833         if(!hd->tag)
     740        if(hd->key == 0)
    834741            continue;
    835742
     
    838745        hd->refs = 0;
    839746
    840         h_free_idx(i, hd);  // currently cannot fail
    841     }
    842 
    843     // free HDATA array
    844     for(size_t j = 0; j < num_pages; j++)
    845     {
    846         if (pages[j])
    847             for(size_t k = 0; k < hdata_per_page; ++k)
    848                 pages[j][k].pathname.~VfsPath();    // FIXME: ugly hack, but necessary to reclaim memory
    849         free(pages[j]);
    850         pages[j] = 0;
    851     }
     747        h_free_hd(hd);
     748    }
     749
     750    pool_destroy(&hpool);
    852751}
    853752
  • ps/trunk/source/lib/res/h_mgr.h

    r9410 r9961  
    7575
    76761) choose a name for the resource, used to represent all resources
    77 of this type. we will call ours "Res1"; all below occurences of this
     77of this type. we will call ours "Res1"; all below occurrences of this
    7878must be replaced with the actual name (exact spelling).
    7979why? the vtbl builder defines its functions as e.g. Res1_reload;
     
    8989Note that all control blocks are stored in fixed-size slots
    9090(HDATA_USER_SIZE bytes), so squeezing the size of your data doesn't
    91 necessarily help unless yours is the largest. However, if the filename
    92 passed to h_alloc fits within the remaining space, it is stored there
    93 (thus saving time+memory). Therefore, do not be extravagant with space.
     91help unless yours is the largest.
    9492
    95933) build its vtbl:
     
    356354
    357355
    358 // resource scope
    359 // used together with flags (e.g. in mem), so no separate type
    360 /*
    361 enum
    362 {
    363     RES_TEMP   = 1,
    364     RES_LEVEL  = 2,
    365     RES_STATIC = 4
    366 };
    367 
    368 #define RES_SCOPE_MASK 7
    369 */
    370356
    371357// h_alloc flags
     
    389375// allocate a new handle.
    390376// if key is 0, or a (key, type) handle doesn't exist,
    391 //   the first free entry is used.
     377//   some free entry is used.
    392378// otherwise, a handle to the existing object is returned,
    393379//   and HDATA.size != 0.
     
    438424// necessary (always wrapping objects in Handles is excessive), we
    439425// provide access to the internal reference count.
    440 extern int h_get_refcnt(Handle h);
     426extern intptr_t h_get_refcnt(Handle h);
    441427
    442428#endif  // #ifndef INCLUDED_H_MGR
  • ps/trunk/source/lib/status.h

    r9871 r9961  
    148148
    14914912     res
     150  00CC h_mgr
    150151  01CC tex
    151152  02CC ogl_shader
Note: See TracChangeset for help on using the changeset viewer.