- Timestamp:
- 08/04/11 19:11:16 (13 years ago)
- Location:
- ps/trunk/source/lib
- Files:
-
- 5 edited
-
allocators/pool.cpp (modified) (2 diffs)
-
res/graphics/ogl_tex.cpp (modified) (2 diffs)
-
res/h_mgr.cpp (modified) (28 diffs)
-
res/h_mgr.h (modified) (5 diffs)
-
status.h (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
ps/trunk/source/lib/allocators/pool.cpp
r9423 r9961 76 76 // otherwise the pool el_size setting. 77 77 const size_t el_size = p->el_size? p->el_size : Align<allocationAlignment>(size); 78 ASSERT(el_size != 0); 78 79 79 // note: this can never happenin pools with variable-sized elements80 // note: freelist is always empty in pools with variable-sized elements 80 81 // because they disallow pool_free. 81 82 void* el = mem_freelist_Detach(p->freelist); 82 if(el) 83 goto have_el; 84 85 // alloc a new entry 83 if(!el) // freelist empty, need to allocate a new entry 86 84 { 87 85 // expand, if necessary … … 93 91 } 94 92 95 have_el: 96 ENSURE(pool_contains(p, el)); // paranoia 93 ASSERT(pool_contains(p, el)); // paranoia 97 94 return el; 98 95 } -
ps/trunk/source/lib/res/graphics/ogl_tex.cpp
r9929 r9961 586 586 // workaround is that ogl_tex_set_* won't call us if the 587 587 // same state values are being set (harmless anyway). 588 int refs = h_get_refcnt(ht);589 if( refs> 1)588 intptr_t refs = h_get_refcnt(ht); 589 if(intptr_t > 1) 590 590 return; // don't complain 591 591 … … 910 910 // note: tex_free is safe even if this OglTex was wrapped - 911 911 // the Tex contains a mem handle. 912 int refs = h_get_refcnt(ht);912 intptr_t refs = h_get_refcnt(ht); 913 913 if(refs == 1) 914 914 { -
ps/trunk/source/lib/res/h_mgr.cpp
r9462 r9961 37 37 #include "lib/fnv_hash.h" 38 38 #include "lib/allocators/overrun_protector.h" 39 #include "lib/allocators/pool.h" 39 40 #include "lib/module_init.h" 40 41 42 static const size_t MAX_EXTANT_HANDLES = 10000; 41 #include "lib/sysdep/cpu.h" // cpu_CAS64 42 43 44 namespace ERR { 45 static const Status H_IDX_INVALID = -120000; // totally invalid 46 static const Status H_IDX_UNUSED = -120001; // beyond current cap 47 static const Status H_TAG_MISMATCH = -120003; 48 static const Status H_TYPE_MISMATCH = -120004; 49 } 50 static const StatusDefinition hStatusDefinitions[] = { 51 { ERR::H_IDX_INVALID, L"Handle index completely out of bounds" }, 52 { ERR::H_IDX_UNUSED, L"Handle index exceeds high-water mark" }, 53 { ERR::H_TAG_MISMATCH, L"Handle tag mismatch (stale reference?)" }, 54 { ERR::H_TYPE_MISMATCH, L"Handle type mismatch" } 55 }; 56 STATUS_ADD_DEFINITIONS(hStatusDefinitions); 57 58 43 59 44 60 // rationale … … 70 86 // may be larger than the field type - only shift Handle vars!) 71 87 72 // - tag (1-based) ensures the handle references a certain resource instance.73 // (field width determines maximum unambiguous resource allocs)74 #define TAG_BITS 3275 const size_t TAG_SHIFT = 0;76 const u32 TAG_MASK = 0xFFFFFFFF; // safer than (1 << 32) - 177 78 88 // - index (0-based) of control block in our array. 79 89 // (field width determines maximum currently open handles) 80 90 #define IDX_BITS 16 81 const size_t IDX_SHIFT = 32; 82 const u32 IDX_MASK = (1l << IDX_BITS) - 1; 91 static const u64 IDX_MASK = (1l << IDX_BITS) - 1; 92 93 // - tag (1-based) ensures the handle references a certain resource instance. 94 // (field width determines maximum unambiguous resource allocs) 95 typedef i64 Tag; // matches cpu_CAS64 type 96 #define TAG_BITS 48 97 static const u64 TAG_MASK = 0xFFFFFFFF; // safer than (1 << 32) - 1 83 98 84 99 // make sure both fields fit within a Handle variable … … 88 103 // return the handle's index field (always non-negative). 89 104 // no error checking! 90 static inline u32h_idx(const Handle h)91 { 92 return ( u32)((h >> IDX_SHIFT)& IDX_MASK) - 1;105 static inline size_t h_idx(const Handle h) 106 { 107 return (size_t)(h & IDX_MASK) - 1; 93 108 } 94 109 95 110 // return the handle's tag field. 96 111 // no error checking! 97 static inline u32 h_tag(constHandle h)98 { 99 return (u32)((h >> TAG_SHIFT) & TAG_MASK);112 static inline Tag h_tag(Handle h) 113 { 114 return h >> IDX_BITS; 100 115 } 101 116 102 117 // build a handle from index and tag. 103 118 // can't fail. 104 static inline Handle handle(const u32 _idx, const u32 tag) 105 { 106 const u32 idx = _idx+1; 107 ENSURE(idx <= IDX_MASK && tag <= TAG_MASK && "handle: idx or tag too big"); 108 // somewhat clunky, but be careful with the shift: 109 // *_SHIFT may be larger than its field's type. 110 Handle h_idx = idx & IDX_MASK; h_idx <<= IDX_SHIFT; 111 Handle h_tag = tag & TAG_MASK; h_tag <<= TAG_SHIFT; 112 Handle h = h_idx | h_tag; 119 static inline Handle handle(size_t idx, u64 tag) 120 { 121 const size_t idxPlusOne = idx+1; 122 ENSURE(idxPlusOne <= IDX_MASK); 123 ENSURE((tag & IDX_MASK) == 0); 124 Handle h = tag | idxPlusOne; 113 125 ENSURE(h > 0); 114 126 return h; … … 120 132 // 121 133 122 123 // determines maximum number of references to a resource. 124 static const size_t REF_BITS = 16; 125 static const u32 REF_MAX = (1ul << REF_BITS)-1; 126 127 static const size_t TYPE_BITS = 8; 128 129 130 // chosen so that all current resource structs are covered, 131 // and so sizeof(HDATA) is a power of 2 (for more efficient array access 132 // and array page usage). 133 static const size_t HDATA_USER_SIZE = 44+64; 134 // chosen so that all current resource structs are covered. 135 static const size_t HDATA_USER_SIZE = 100; 134 136 135 137 136 138 struct HDATA 137 139 { 140 // we only need the tag, because it is trivial to compute 141 // &HDATA from idx and vice versa. storing the entire handle 142 // avoids needing to extract the tag field. 143 Handle h; // NB: will be overwritten by pool_free 144 138 145 uintptr_t key; 139 146 140 u32 tag : TAG_BITS; 141 142 // smaller bitfields combined into 1 143 u32 refs : REF_BITS; 144 u32 type_idx : TYPE_BITS; 147 intptr_t refs; 148 149 // smaller bit fields combined into 1 145 150 // .. if set, do not actually release the resource (i.e. call dtor) 146 151 // when the handle is h_free-d, regardless of the refcount. … … 168 173 169 174 // max data array entries. compared to last_in_use => signed. 170 static const ssize_t hdata_cap = 1ul << IDX_BITS; 171 172 // allocate entries as needed so as not to waste memory 173 // (hdata_cap may be large). deque-style array of pages 174 // to balance locality, fragmentation, and waste. 175 static const size_t PAGE_SIZE = 4096; 176 static const size_t hdata_per_page = PAGE_SIZE / sizeof(HDATA); 177 static const size_t num_pages = hdata_cap / hdata_per_page; 178 static HDATA* pages[num_pages]; 179 180 // these must be signed, because there won't always be a valid 181 // first or last element. 182 static ssize_t first_free = -1; // don't want to scan array every h_alloc 183 static ssize_t last_in_use = -1; // don't search unused entries 175 static const ssize_t hdata_cap = (1ul << IDX_BITS)/4; 176 177 // pool of fixed-size elements allows O(1) alloc and free; 178 // there is a simple mapping between HDATA address and index. 179 static Pool hpool; 184 180 185 181 … … 188 184 189 185 190 // get a (possibly new) array entry; array is non-contiguous. 191 // 192 // fails (returns 0) if idx is out of bounds, or if accessing a new page 193 // for the first time, and there's not enough memory to allocate it. 194 // 195 // also used by h_data, and alloc_idx to find a free entry. 196 static HDATA* h_data_from_idx(const ssize_t idx) 197 { 198 // don't compare against last_in_use - this is called before allocating 199 // new entries, and to check if the next (but possibly not yet valid) 200 // entry is free. tag check protects against using unallocated entries. 201 if(idx < 0 || idx >= hdata_cap) 202 return 0; 203 HDATA*& page = pages[idx / hdata_per_page]; 204 if(!page) 205 { 206 page = (HDATA*)calloc(1, PAGE_SIZE); 207 if(!page) 208 return 0; 209 210 // Initialise all the VfsPath members 211 for(size_t i = 0; i < hdata_per_page; ++i) 212 new (&page[i].pathname) VfsPath; 213 } 214 215 // note: VC7.1 optimizes the divides to shift and mask. 216 217 HDATA* hd = &page[idx % hdata_per_page]; 186 // get a (possibly new) array entry. 187 // 188 // fails if idx is out of bounds. 189 static Status h_data_from_idx(ssize_t idx, HDATA*& hd) 190 { 191 // don't check if idx is beyond the current high-water mark, because 192 // we might be allocating a new entry. subsequent tag checks protect 193 // against using unallocated entries. 194 if(size_t(idx) >= hdata_cap) // also detects negative idx 195 WARN_RETURN(ERR::H_IDX_INVALID); 196 197 hd = (HDATA*)(hpool.da.base + idx*hpool.el_size); 218 198 hd->num_derefs++; 219 return hd; 199 return INFO::OK; 200 } 201 202 static ssize_t h_idx_from_data(HDATA* hd) 203 { 204 if(!pool_contains(&hpool, hd)) 205 WARN_RETURN(ERR::INVALID_POINTER); 206 return (uintptr_t(hd) - uintptr_t(hpool.da.base))/hpool.el_size; 220 207 } 221 208 … … 224 211 // only uses (and checks) the index field. 225 212 // used by h_force_close (which must work regardless of tag). 226 static inline HDATA* h_data_no_tag(const Handle h)213 static inline Status h_data_no_tag(const Handle h, HDATA*& hd) 227 214 { 228 215 ssize_t idx = (ssize_t)h_idx(h); 216 RETURN_STATUS_IF_ERR(h_data_from_idx(idx, hd)); 229 217 // need to verify it's in range - h_data_from_idx can only verify that 230 218 // it's < maximum allowable index. 231 if( 0 > idx || idx > last_in_use)232 return 0;233 return h_data_from_idx(idx);219 if(uintptr_t(hd) > uintptr_t(hpool.da.base)+hpool.da.pos) 220 WARN_RETURN(ERR::H_IDX_UNUSED); 221 return INFO::OK; 234 222 } 235 223 … … 238 226 // also verifies the tag field. 239 227 // used by functions callable for any handle type, e.g. h_filename. 240 static inline HDATA* h_data_tag(const Handle h) 241 { 242 HDATA* hd = h_data_no_tag(h); 243 if(!hd) 244 return 0; 245 246 // note: tag = 0 marks unused entries => is invalid 247 u32 tag = h_tag(h); 248 if(tag == 0 || tag != hd->tag) 249 return 0; 250 251 return hd; 228 static inline Status h_data_tag(Handle h, HDATA*& hd) 229 { 230 RETURN_STATUS_IF_ERR(h_data_no_tag(h, hd)); 231 232 if(h != hd->h) 233 { 234 debug_printf(L"h_mgr: expected handle %llx, got %llx\n", hd->h, h); 235 WARN_RETURN(ERR::H_TAG_MISMATCH); 236 } 237 238 return INFO::OK; 252 239 } 253 240 … … 256 243 // also verifies the type. 257 244 // used by most functions accessing handle data. 258 static HDATA* h_data_tag_type(const Handle h, const H_Type type) 259 { 260 HDATA* hd = h_data_tag(h); 261 if(!hd) 262 return 0; 245 static Status h_data_tag_type(const Handle h, const H_Type type, HDATA*& hd) 246 { 247 RETURN_STATUS_IF_ERR(h_data_tag(h, hd)); 263 248 264 249 // h_alloc makes sure type isn't 0, so no need to check that here. 265 250 if(hd->type != type) 266 return 0; 267 268 return hd; 269 } 270 271 272 //----------------------------------------------------------------------------- 273 274 // idx and hd are undefined if we fail. 275 // called by h_alloc only. 276 static Status alloc_idx(ssize_t& idx, HDATA*& hd) 277 { 278 // we already know the first free entry 279 if(first_free != -1) 280 { 281 idx = first_free; 282 hd = h_data_from_idx(idx); 283 } 284 // need to look for a free entry, or alloc another 285 else 286 { 287 // look for an unused entry 288 for(idx = 0; idx <= last_in_use; idx++) 289 { 290 hd = h_data_from_idx(idx); 291 ENSURE(hd); // can't fail - idx is valid 292 293 // found one - done 294 if(!hd->tag) 295 goto have_idx; 296 } 297 298 // add another 299 // .. too many already: IDX_BITS must be increased. 300 if(last_in_use >= hdata_cap) 301 WARN_RETURN(ERR::LIMIT); 302 idx = last_in_use+1; // just incrementing idx would start it at 1 303 hd = h_data_from_idx(idx); 304 if(!hd) 305 WARN_RETURN(ERR::NO_MEM); 306 // can't fail for any other reason - idx is checked above. 307 { // VC6 goto fix 308 bool is_unused = !hd->tag; 309 ENSURE(is_unused && "invalid last_in_use"); 310 } 311 312 have_idx:; 313 } 314 315 // check if next entry is free 316 HDATA* hd2 = h_data_from_idx(idx+1); 317 if(hd2 && hd2->tag == 0) 318 first_free = idx+1; 319 else 320 first_free = -1; 321 322 if(idx > last_in_use) 323 last_in_use = idx; 324 325 return INFO::OK; 326 } 327 328 329 static Status free_idx(ssize_t idx) 330 { 331 if(first_free == -1 || idx < first_free) 332 first_free = idx; 251 { 252 debug_printf(L"h_mgr: expected type %ws, got %ws\n", hd->type->name, type->name); 253 WARN_RETURN(ERR::H_TYPE_MISMATCH); 254 } 255 333 256 return INFO::OK; 334 257 } … … 373 296 { 374 297 ssize_t idx = it->second; 375 HDATA* hd = h_data_from_idx(idx); 376 // found match 377 if(hd && hd->type == type && hd->key == key) 378 { 379 if(remove_option == KEY_REMOVE) 380 key2idx->erase(it); 381 ret = handle(idx, hd->tag); 382 break; 383 } 298 HDATA* hd; 299 if(h_data_from_idx(idx, hd) != INFO::OK) 300 continue; 301 if(hd->type != type || hd->key != key) 302 continue; 303 304 // found a match 305 if(remove_option == KEY_REMOVE) 306 key2idx->erase(it); 307 ret = hd->h; 308 break; 384 309 } 385 310 … … 453 378 454 379 455 static u32 gen_tag() 456 { 457 static u32 tag; 458 if(++tag >= TAG_MASK) 459 { 460 debug_warn(L"h_mgr: tag overflow - allocations are no longer unique."\ 461 L"may not notice stale handle reuse. increase TAG_BITS."); 462 tag = 1; 463 } 464 return tag; 380 static Tag gen_tag() 381 { 382 static volatile Tag tag; 383 for(;;) 384 { 385 const Tag oldTag = tag; 386 const Tag newTag = oldTag + (1ull << IDX_BITS); 387 // it's not easy to detect overflow, because compilers 388 // are allowed to assume it'll never happen. however, 389 // pow(2, 64-IDX_BITS) is "enough" anyway. 390 if(cpu_CAS64(&tag, oldTag, newTag)) 391 return newTag; 392 } 465 393 } 466 394 … … 476 404 return 0; 477 405 478 HDATA* hd = h_data_tag_type(h, type); 479 // too many references - increase REF_BITS 480 if(hd->refs == REF_MAX) 481 WARN_RETURN(ERR::LIMIT); 482 483 hd->refs++; 406 HDATA* hd; 407 RETURN_STATUS_IF_ERR(h_data_tag_type(h, type, hd)); // h_find means this won't fail 408 409 cpu_AtomicAdd(&hd->refs, 1); 484 410 485 411 // we are reactivating a closed but cached handle. … … 490 416 if(hd->refs == 1) 491 417 { 492 const u32 tag = gen_tag(); 493 hd->tag = tag; 418 const Tag tag = gen_tag(); 494 419 h = handle(h_idx(h), tag); // can't fail 420 hd->h = h; 495 421 } 496 422 … … 530 456 static Handle alloc_new_handle(H_Type type, const PIVFS& vfs, const VfsPath& pathname, uintptr_t key, size_t flags, va_list* init_args) 531 457 { 532 ssize_t idx; 533 HDATA* hd; 534 RETURN_STATUS_IF_ERR(alloc_idx(idx, hd)); 458 HDATA* hd = (HDATA*)pool_alloc(&hpool, 0); 459 if(!hd) 460 WARN_RETURN(ERR::NO_MEM); 461 new(&hd->pathname) VfsPath; 462 463 ssize_t idx = h_idx_from_data(hd); 464 RETURN_STATUS_IF_ERR(idx); 535 465 536 466 // (don't want to do this before the add-reference exit, 537 467 // so as not to waste tags for often allocated handles.) 538 const u32tag = gen_tag();468 const Tag tag = gen_tag(); 539 469 Handle h = handle(idx, tag); // can't fail. 540 470 541 hd-> tag = tag;471 hd->h = h; 542 472 hd->key = key; 543 473 hd->type = type; … … 594 524 //----------------------------------------------------------------------------- 595 525 596 // currently cannot fail. 597 static Status h_free_idx(ssize_t idx, HDATA* hd) 598 { 599 // only decrement if refcount not already 0. 600 if(hd->refs > 0) 601 hd->refs--; 526 static void h_free_hd(HDATA* hd) 527 { 528 for(;;) 529 { 530 const intptr_t refs = hd->refs; 531 if(refs <= 0) // skip decrement 532 break; 533 if(cpu_CAS(&hd->refs, refs, refs-1)) // success 534 break; 535 } 602 536 603 537 // still references open or caching requests it stays - do not release. 604 538 if(hd->refs > 0 || hd->keep_open) 605 return INFO::OK;539 return; 606 540 607 541 // actually release the resource (call dtor, free control block). … … 631 565 hd->pathname.~VfsPath(); // FIXME: ugly hack, but necessary to reclaim memory 632 566 memset(hd, 0, sizeof(*hd)); 633 new (&hd->pathname) VfsPath; // FIXME too: necessary because otherwise it'll break if we reuse this page 634 635 free_idx(idx); 636 637 return INFO::OK; 567 pool_free(&hpool, hd); 638 568 } 639 569 … … 641 571 Status h_free(Handle& h, H_Type type) 642 572 { 643 ssize_t idx = h_idx(h); 644 HDATA* hd = h_data_tag_type(h, type); 573 // 0-initialized or an error code; don't complain because this 574 // happens often and is harmless. 575 if(h <= 0) 576 return INFO::OK; 645 577 646 578 // wipe out the handle to prevent reuse but keep a copy for below. … … 648 580 h = 0; 649 581 650 // h was invalid 651 if(!hd) 652 { 653 // 0-initialized or an error code; don't complain because this 654 // happens often and is harmless. 655 if(h_copy <= 0) 656 return INFO::OK; 657 // this was a valid handle but was probably freed in the meantime. 658 // complain because this probably indicates a bug somewhere. 659 WARN_RETURN(ERR::INVALID_HANDLE); 660 } 661 662 return h_free_idx(idx, hd); 582 HDATA* hd; 583 RETURN_STATUS_IF_ERR(h_data_tag_type(h_copy, type, hd)); 584 585 h_free_hd(hd); 586 return INFO::OK; 663 587 } 664 588 … … 669 593 void* h_user_data(const Handle h, const H_Type type) 670 594 { 671 HDATA* hd = h_data_tag_type(h, type);672 if( !hd)595 HDATA* hd; 596 if(h_data_tag_type(h, type, hd) != INFO::OK) 673 597 return 0; 674 598 … … 689 613 // don't require type check: should be useable for any handle, 690 614 // even if the caller doesn't know its type. 691 HDATA* hd = h_data_tag(h); 692 if(!hd) 693 { 694 DEBUG_WARN_ERR(ERR::LOGIC); 615 HDATA* hd; 616 if(h_data_tag(h, hd) != INFO::OK) 695 617 return VfsPath(); 696 }697 618 return hd->pathname; 698 619 } … … 708 629 // order (the parent resource may be reloaded first, and load the child, 709 630 // whose original data would leak). 710 for(ssize_t i = 0; i <= last_in_use; i++) 711 { 712 HDATA* hd = h_data_from_idx(i); 713 if(!hd || hd->key != key || hd->disallow_reload) 631 for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size)) 632 { 633 if(hd->key == 0 || hd->key != key || hd->disallow_reload) 714 634 continue; 715 635 hd->type->dtor(hd->user); … … 719 639 720 640 // now reload all affected handles 721 for(ssize_t i = 0; i <= last_in_use; i++)722 {723 HDATA* hd = h_data_from_idx(i);724 if( !hd|| hd->key != key || hd->disallow_reload)641 size_t i = 0; 642 for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size), i++) 643 { 644 if(hd->key == 0 || hd->key != key || hd->disallow_reload) 725 645 continue; 726 646 727 Handle h = handle(i, hd->tag); 728 729 Status err = hd->type->reload(hd->user, vfs, hd->pathname, h); 647 Status err = hd->type->reload(hd->user, vfs, hd->pathname, hd->h); 730 648 // don't stop if an error is encountered - try to reload them all. 731 649 if(err < 0) 732 650 { 733 h_free(h , hd->type);651 h_free(hd->h, hd->type); 734 652 if(ret == 0) // don't overwrite first error 735 653 ret = err; … … 759 677 { 760 678 // require valid index; ignore tag; type checked below. 761 HDATA* hd = h_data_no_tag(h);762 if(!hd || hd->type != type)763 WARN_RETURN(ERR::INVALID_HANDLE);764 u32 idx = h_idx(h);679 HDATA* hd; 680 RETURN_STATUS_IF_ERR(h_data_no_tag(h, hd)); 681 if(hd->type != type) 682 WARN_RETURN(ERR::H_TYPE_MISMATCH); 765 683 hd->keep_open = 0; 766 684 hd->refs = 0; 767 return h_free_idx(idx, hd); 685 h_free_hd(hd); 686 return INFO::OK; 768 687 } 769 688 … … 777 696 void h_add_ref(Handle h) 778 697 { 779 HDATA* hd = h_data_tag(h); 780 if(!hd) 781 { 782 DEBUG_WARN_ERR(ERR::LOGIC); // invalid handle 698 HDATA* hd; 699 if(h_data_tag(h, hd) != INFO::OK) 783 700 return; 784 }785 701 786 702 ENSURE(hd->refs); // if there are no refs, how did the caller manage to keep a Handle?! 787 hd->refs++;703 cpu_AtomicAdd(&hd->refs, 1); 788 704 } 789 705 … … 795 711 // necessary (always wrapping objects in Handles is excessive), we 796 712 // provide access to the internal reference count. 797 int h_get_refcnt(Handle h) 798 { 799 HDATA* hd = h_data_tag(h); 800 if(!hd) 801 WARN_RETURN(ERR::INVALID_HANDLE); 713 intptr_t h_get_refcnt(Handle h) 714 { 715 HDATA* hd; 716 RETURN_STATUS_IF_ERR(h_data_tag(h, hd)); 802 717 803 718 ENSURE(hd->refs); // if there are no refs, how did the caller manage to keep a Handle?! … … 810 725 static Status Init() 811 726 { 727 RETURN_STATUS_IF_ERR(pool_create(&hpool, hdata_cap*sizeof(HDATA), sizeof(HDATA))); 812 728 return INFO::OK; 813 729 } … … 818 734 819 735 // forcibly close all open handles 820 for(ssize_t i = 0; i <= last_in_use; i++) 821 { 822 HDATA* hd = h_data_from_idx(i); 823 // can't fail - i is in bounds by definition, and 824 // each HDATA entry has already been allocated. 825 if(!hd) 826 { 827 DEBUG_WARN_ERR(ERR::LOGIC); // h_data_from_idx failed - why?! 828 continue; 829 } 830 736 for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size)) 737 { 831 738 // it's already been freed; don't free again so that this 832 739 // doesn't look like an error. 833 if( !hd->tag)740 if(hd->key == 0) 834 741 continue; 835 742 … … 838 745 hd->refs = 0; 839 746 840 h_free_idx(i, hd); // currently cannot fail 841 } 842 843 // free HDATA array 844 for(size_t j = 0; j < num_pages; j++) 845 { 846 if (pages[j]) 847 for(size_t k = 0; k < hdata_per_page; ++k) 848 pages[j][k].pathname.~VfsPath(); // FIXME: ugly hack, but necessary to reclaim memory 849 free(pages[j]); 850 pages[j] = 0; 851 } 747 h_free_hd(hd); 748 } 749 750 pool_destroy(&hpool); 852 751 } 853 752 -
ps/trunk/source/lib/res/h_mgr.h
r9410 r9961 75 75 76 76 1) choose a name for the resource, used to represent all resources 77 of this type. we will call ours "Res1"; all below occur ences of this77 of this type. we will call ours "Res1"; all below occurrences of this 78 78 must be replaced with the actual name (exact spelling). 79 79 why? the vtbl builder defines its functions as e.g. Res1_reload; … … 89 89 Note that all control blocks are stored in fixed-size slots 90 90 (HDATA_USER_SIZE bytes), so squeezing the size of your data doesn't 91 necessarily help unless yours is the largest. However, if the filename 92 passed to h_alloc fits within the remaining space, it is stored there 93 (thus saving time+memory). Therefore, do not be extravagant with space. 91 help unless yours is the largest. 94 92 95 93 3) build its vtbl: … … 356 354 357 355 358 // resource scope359 // used together with flags (e.g. in mem), so no separate type360 /*361 enum362 {363 RES_TEMP = 1,364 RES_LEVEL = 2,365 RES_STATIC = 4366 };367 368 #define RES_SCOPE_MASK 7369 */370 356 371 357 // h_alloc flags … … 389 375 // allocate a new handle. 390 376 // if key is 0, or a (key, type) handle doesn't exist, 391 // the firstfree entry is used.377 // some free entry is used. 392 378 // otherwise, a handle to the existing object is returned, 393 379 // and HDATA.size != 0. … … 438 424 // necessary (always wrapping objects in Handles is excessive), we 439 425 // provide access to the internal reference count. 440 extern int h_get_refcnt(Handle h);426 extern intptr_t h_get_refcnt(Handle h); 441 427 442 428 #endif // #ifndef INCLUDED_H_MGR -
ps/trunk/source/lib/status.h
r9871 r9961 148 148 149 149 12 res 150 00CC h_mgr 150 151 01CC tex 151 152 02CC ogl_shader
Note:
See TracChangeset
for help on using the changeset viewer.
