- Timestamp:
- 08/21/11 13:00:09 (13 years ago)
- Location:
- ps/trunk/source/lib
- Files:
-
- 6 added
- 16 edited
-
alignment.h (modified) (1 diff)
-
allocators/allocator_adapters.h (added)
-
allocators/allocator_policies.h (added)
-
allocators/arena.h (added)
-
allocators/dynarray.cpp (modified) (9 diffs)
-
allocators/dynarray.h (modified) (2 diffs)
-
allocators/overrun_protector.h (modified) (5 diffs)
-
allocators/pool.cpp (modified) (2 diffs)
-
allocators/pool.h (modified) (3 diffs)
-
allocators/tests/test_allocators.h (modified) (1 diff)
-
allocators/unique_range.cpp (modified) (3 diffs)
-
allocators/unique_range.h (modified) (2 diffs)
-
bits.h (modified) (1 diff)
-
debug.cpp (modified) (3 diffs)
-
debug.h (modified) (1 diff)
-
pch/pch_warnings.h (modified) (1 diff)
-
sysdep/os/unix/uvm.cpp (added)
-
sysdep/os/win/wnuma.cpp (modified) (4 diffs)
-
sysdep/os/win/wposix/wmman.cpp (modified) (4 diffs)
-
sysdep/os/win/wposix/wmman.h (modified) (2 diffs)
-
sysdep/os/win/wvm.cpp (added)
-
sysdep/vm.h (added)
Legend:
- Unmodified
- Added
- Removed
-
ps/trunk/source/lib/alignment.h
r9875 r10051 66 66 // 67 67 68 static const size_t allocationAlignment = ARCH_AMD64? 16 : 8;68 static const size_t allocationAlignment = 16; 69 69 70 70 static const size_t KiB = size_t(1) << 10; -
ps/trunk/source/lib/allocators/dynarray.cpp
r10024 r10051 29 29 30 30 #include "lib/alignment.h" 31 #include "lib/ allocators/page_aligned.h"31 #include "lib/sysdep/vm.h" 32 32 33 33 … … 40 40 const size_t cur_size = da->cur_size; 41 41 const size_t pos = da->pos; 42 const int prot = da->prot;43 42 44 43 // note: this happens if max_size == 0 … … 53 52 if(pos > cur_size || pos > max_size_pa) 54 53 WARN_RETURN(ERR::_5); 55 if(prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC))56 WARN_RETURN(ERR::_6);57 54 58 55 return INFO::OK; … … 64 61 Status da_alloc(DynArray* da, size_t max_size) 65 62 { 63 ENSURE(max_size != 0); 66 64 const size_t max_size_pa = Align<pageSize>(max_size); 67 65 68 u8* p = 0;69 if( max_size_pa) // (avoid mmap failure)70 RETURN_STATUS_IF_ERR(mem_Reserve(max_size_pa, &p));66 u8* p = (u8*)vm::ReserveAddressSpace(max_size_pa); 67 if(!p) 68 return ERR::NO_MEM; // NOWARN (already done in vm) 71 69 72 70 da->base = p; … … 74 72 da->cur_size = 0; 75 73 da->cur_size_pa = 0; 76 da->prot = PROT_READ|PROT_WRITE;77 74 da->pos = 0; 78 75 CHECK_DA(da); … … 85 82 CHECK_DA(da); 86 83 87 u8* p = da->base; 88 size_t size_pa = da->max_size_pa; 84 vm::ReleaseAddressSpace(da->base, da->max_size_pa); 89 85 90 86 // wipe out the DynArray for safety 91 // (must be done here because mem_Release may fail)92 87 memset(da, 0, sizeof(*da)); 93 88 94 if(size_pa)95 RETURN_STATUS_IF_ERR(mem_Release(p, size_pa));96 89 return INFO::OK; 97 90 } … … 114 107 115 108 u8* end = da->base + cur_size_pa; 109 bool ok = true; 116 110 // expanding 117 111 if(size_delta_pa > 0) 118 RETURN_STATUS_IF_ERR(mem_Commit(end, size_delta_pa, da->prot));112 ok = vm::Commit(uintptr_t(end), size_delta_pa); 119 113 // shrinking 120 114 else if(size_delta_pa < 0) 121 RETURN_STATUS_IF_ERR(mem_Decommit(end+size_delta_pa, -size_delta_pa));115 ok = vm::Decommit(uintptr_t(end+size_delta_pa), -size_delta_pa); 122 116 // else: no change in page count, e.g. if going from size=1 to 2 123 117 // (we don't want mem_* to have to handle size=0) … … 126 120 da->cur_size_pa = new_size_pa; 127 121 CHECK_DA(da); 128 return INFO::OK;122 return ok? INFO::OK : ERR::FAIL; 129 123 } 130 124 … … 139 133 140 134 141 Status da_set_prot(DynArray* da, int prot)142 {143 CHECK_DA(da);144 145 da->prot = prot;146 RETURN_STATUS_IF_ERR(mem_Protect(da->base, da->cur_size_pa, prot));147 148 CHECK_DA(da);149 return INFO::OK;150 }151 152 153 135 Status da_append(DynArray* da, const void* data, size_t size) 154 136 { -
ps/trunk/source/lib/allocators/dynarray.h
r10024 r10051 43 43 size_t cur_size; /// committed 44 44 size_t cur_size_pa; 45 46 /**47 * mprotect flags applied to newly committed pages48 **/49 int prot;50 45 51 46 size_t pos; … … 99 94 100 95 /** 101 * change access rights of the array memory.102 *103 * used to implement write-protection. affects the currently committed104 * pages as well as all subsequently added pages.105 *106 * @param da DynArray.107 * @param prot a combination of the PROT_* values used with mprotect.108 * @return Status.109 **/110 LIB_API Status da_set_prot(DynArray* da, int prot);111 112 /**113 96 * "write" to array, i.e. copy from the given buffer. 114 97 * -
ps/trunk/source/lib/allocators/overrun_protector.h
r9361 r10051 25 25 26 26 #include "lib/config2.h" // CONFIG2_ALLOCATORS_OVERRUN_PROTECTION 27 #include "lib/ allocators/page_aligned.h"27 #include "lib/sysdep/vm.h" 28 28 29 29 /** … … 55 55 public: 56 56 OverrunProtector() 57 : object(new( page_aligned_alloc(sizeof(T))) T())57 : object(new(vm::Allocate(sizeof(T))) T()) 58 58 { 59 59 lock(); … … 64 64 unlock(); 65 65 object->~T(); // call dtor (since we used placement new) 66 page_aligned_free(object, sizeof(T));66 vm::Free(object, sizeof(T)); 67 67 } 68 68 … … 76 76 { 77 77 #if CONFIG2_ALLOCATORS_OVERRUN_PROTECTION 78 mprotect(object, sizeof(T), PROT_NONE);78 vm::Protect(object, sizeof(T), PROT_NONE); 79 79 #endif 80 80 } … … 84 84 { 85 85 #if CONFIG2_ALLOCATORS_OVERRUN_PROTECTION 86 mprotect(object, sizeof(T), PROT_READ|PROT_WRITE);86 vm::Protect(object, sizeof(T), PROT_READ|PROT_WRITE); 87 87 #endif 88 88 } -
ps/trunk/source/lib/allocators/pool.cpp
r9961 r10051 1 /* Copyright (c) 201 0Wildfire Games1 /* Copyright (c) 2011 Wildfire Games 2 2 * 3 3 * Permission is hereby granted, free of charge, to any person obtaining … … 30 30 #include "lib/alignment.h" 31 31 #include "lib/allocators/freelist.h" 32 #include "lib/allocators/allocator_adapters.h" 32 33 33 34 #include "lib/timer.h" 34 35 36 namespace Allocators { 37 38 template<class Storage> 39 struct BasicPoolTest 40 { 41 void operator()() const 42 { 43 Pool<double, Storage> p(100); 44 const size_t initialSpace = p.RemainingObjects(); 45 double* p1 = p.Allocate(); 46 ENSURE(p1 != 0); 47 ENSURE(p.Contains(uintptr_t(p1))); 48 ENSURE(p.RemainingObjects() == initialSpace-1); 49 ENSURE(p.Contains(uintptr_t(p1)+1)); 50 ENSURE(p.Contains(uintptr_t(p1)+sizeof(double)-1)); 51 ENSURE(!p.Contains(uintptr_t(p1)-1)); 52 ENSURE(!p.Contains(uintptr_t(p1)+sizeof(double))); 53 if(p.RemainingObjects() == 0) 54 ENSURE(p.Allocate() == 0); // full 55 else 56 ENSURE(p.Allocate() != 0); // can still expand 57 p.DeallocateAll(); 58 ENSURE(!p.Contains(uintptr_t(p1))); 59 60 p1 = p.Allocate(); 61 ENSURE(p1 != 0); 62 ENSURE(p.Contains(uintptr_t(p1))); 63 ENSURE(p.RemainingObjects() == initialSpace-1); 64 double* p2 = p.Allocate(); 65 ENSURE(p2 != 0); 66 ENSURE(p.Contains(uintptr_t(p2))); 67 ENSURE(p.RemainingObjects() == initialSpace-2); 68 ENSURE(p2 == (double*)(uintptr_t(p1)+sizeof(double))); 69 if(p.RemainingObjects() == 0) 70 ENSURE(p.Allocate() == 0); // full 71 else 72 ENSURE(p.Allocate() != 0); // can still expand 73 } 74 }; 75 76 void TestPool() 77 { 78 ForEachStorage<BasicPoolTest>(); 79 } 80 81 } // namespace Allocators 82 83 35 84 TIMER_ADD_CLIENT(tc_pool_alloc); 36 37 85 38 86 Status pool_create(Pool* p, size_t max_size, size_t el_size) -
ps/trunk/source/lib/allocators/pool.h
r9944 r10051 1 /* Copyright (c) 201 0Wildfire Games1 /* Copyright (c) 2011 Wildfire Games 2 2 * 3 3 * Permission is hereby granted, free of charge, to any person obtaining … … 22 22 23 23 /* 24 * pool allocator 24 * pool allocator (fixed-size blocks, freelist). 25 25 */ 26 26 27 27 #ifndef INCLUDED_ALLOCATORS_POOL 28 28 #define INCLUDED_ALLOCATORS_POOL 29 30 #include "lib/bits.h" // ROUND_UP 31 #include "lib/allocators/allocator_policies.h" 32 33 namespace Allocators { 34 35 /** 36 * allocator design parameters: 37 * - O(1) allocation and deallocation; 38 * - fixed-size objects; 39 * - support for deallocating all objects; 40 * - consecutive allocations are back-to-back; 41 * - objects are aligned to the pointer size. 42 **/ 43 template<typename T, class Storage = Storage_Fixed<> > 44 class Pool 45 { 46 public: 47 // (must round up because freelist stores pointers inside objects) 48 static const size_t objectSize = ROUND_UP(sizeof(T), sizeof(intptr_t)); 49 50 Pool(size_t maxObjects) 51 : storage(maxObjects*objectSize) 52 { 53 DeallocateAll(); 54 } 55 56 size_t RemainingObjects() 57 { 58 return (storage.MaxCapacity() - end) / objectSize; 59 } 60 61 T* Allocate() 62 { 63 void* p = mem_freelist_Detach(freelist); 64 if(p) 65 { 66 ASSERT(Contains(p)); 67 return (T*)p; 68 } 69 70 return (T*)StorageAppend(storage, end, objectSize); 71 } 72 73 void Deallocate(T* p) 74 { 75 ASSERT(Contains(p)); 76 mem_freelist_AddToFront(freelist, p); 77 } 78 79 void DeallocateAll() 80 { 81 freelist = mem_freelist_Sentinel(); 82 end = 0; 83 } 84 85 // @return whether the address lies within the previously allocated range. 86 bool Contains(uintptr_t address) const 87 { 88 return (address - storage.Address()) < end; 89 } 90 91 private: 92 Storage storage; 93 size_t end; 94 void* freelist; 95 }; 96 97 LIB_API void TestPool(); 98 99 } // namespace Allocators 100 29 101 30 102 #include "lib/allocators/dynarray.h" … … 147 219 148 220 /** 149 * C++ wrapper on top of pool_alloc for fixed-size allocations (determined by sizeof(T))150 *151 * T must be POD (Plain Old Data) because it is memset to 0!152 **/153 template<class T>154 class PoolAllocator155 {156 public:157 explicit PoolAllocator(size_t maxElements)158 {159 (void)pool_create(&m_pool, maxElements*sizeof(T), sizeof(T));160 }161 162 ~PoolAllocator()163 {164 (void)pool_destroy(&m_pool);165 }166 167 T* AllocateZeroedMemory()168 {169 T* t = (T*)pool_alloc(&m_pool, 0);170 if(!t)171 throw std::bad_alloc();172 memset(t, 0, sizeof(T));173 return t;174 }175 176 void Free(T* t)177 {178 pool_free(&m_pool, t);179 }180 181 private:182 Pool m_pool;183 };184 185 /**186 221 * C++ wrapper on top of pool_alloc for variable-sized allocations. 187 222 * Memory is returned uninitialised. -
ps/trunk/source/lib/allocators/tests/test_allocators.h
r10024 r10051 36 36 TS_ASSERT_OK(da_alloc(&da, 1000)); 37 37 TS_ASSERT_OK(da_set_size(&da, 1000)); 38 TS_ASSERT_OK(da_set_prot(&da, PROT_NONE));39 38 TS_ASSERT_OK(da_free(&da)); 40 39 } -
ps/trunk/source/lib/allocators/unique_range.cpp
r9871 r10051 29 29 30 30 31 // NB: callers should skip this if *idxDeleterOut != 0 (avoids the overhead 32 // of an unnecessary indirect function call) 31 33 void RegisterUniqueRangeDeleter(UniqueRangeDeleter deleter, volatile IdxDeleter* idxDeleterOut) 32 34 { … … 45 47 deleters[idxDeleter] = deleter; 46 48 COMPILER_FENCE; 47 *idxDeleterOut = idxDeleter; // linearization point49 *idxDeleterOut = idxDeleter; 48 50 } 49 51 … … 67 69 68 70 static volatile IdxDeleter idxDeleterAligned; 69 if(idxDeleterAligned == 0) 71 if(idxDeleterAligned == 0) // (optional optimization) 70 72 RegisterUniqueRangeDeleter(FreeAligned, &idxDeleterAligned); 71 73 72 74 return RVALUE(UniqueRange(p, size, idxDeleterAligned)); 73 75 } 76 77 78 UniqueRange AllocateVM(size_t size, vm::PageType pageType, int prot) 79 { 80 const UniqueRange::pointer p = vm::Allocate(size, pageType, prot); 81 82 static volatile IdxDeleter idxDeleter; 83 if(idxDeleter == 0) // (optional optimization) 84 RegisterUniqueRangeDeleter(vm::Free, &idxDeleter); 85 86 return RVALUE(UniqueRange(p, size, idxDeleter)); 87 } -
ps/trunk/source/lib/allocators/unique_range.h
r9871 r10051 4 4 #include "lib/lib_api.h" 5 5 #include "lib/alignment.h" // allocationAlignment 6 #include "lib/sysdep/vm.h" 6 7 7 8 // we usually don't hold multiple references to allocations, so unique_ptr … … 192 193 LIB_API UniqueRange AllocateAligned(size_t size, size_t alignment); 193 194 195 LIB_API UniqueRange AllocateVM(size_t size, vm::PageType pageSize = vm::kDefault, int prot = PROT_READ|PROT_WRITE); 196 197 194 198 #endif // #ifndef INCLUDED_ALLOCATORS_UNIQUE_RANGE -
ps/trunk/source/lib/bits.h
r9423 r10051 230 230 } 231 231 232 // evaluates to an expression suitable as an initializer 233 // for constant static data members. 234 #define ROUND_UP(n, multiple) (((n) + (multiple)-1) & ~((multiple)-1)) 235 232 236 233 237 template<typename T> -
ps/trunk/source/lib/debug.cpp
r9875 r10051 34 34 #include "lib/alignment.h" 35 35 #include "lib/app_hooks.h" 36 #include "lib/allocators/page_aligned.h"37 36 #include "lib/fnv_hash.h" 37 #include "lib/sysdep/vm.h" 38 38 #include "lib/sysdep/cpu.h" // cpu_CAS 39 39 #include "lib/sysdep/sysdep.h" … … 208 208 void debug_FreeErrorMessage(ErrorMessageMem* emm) 209 209 { 210 page_aligned_free(emm->pa_mem, messageSize);210 vm::Free(emm->pa_mem, messageSize); 211 211 } 212 212 … … 275 275 276 276 // rationale: see ErrorMessageMem 277 emm->pa_mem = page_aligned_alloc(messageSize);277 emm->pa_mem = vm::Allocate(messageSize); 278 278 wchar_t* const buf = (wchar_t*)emm->pa_mem; 279 279 if(!buf) -
ps/trunk/source/lib/debug.h
r9875 r10051 538 538 // - error messages with stack traces require a good deal of memory 539 539 // (hundreds of KB). static buffers of that size are undesirable. 540 // - the heap may be corrupted, so don't use malloc. allocator.h's541 // page_aligned_malloc (implemented via mmap)should be safe.540 // - the heap may be corrupted, so don't use malloc. 541 // instead, "lib/sysdep/vm.h" functions should be safe. 542 542 // - alloca is a bit iffy (the stack may be maxed out), non-portable and 543 543 // complicates the code because it can't be allocated by a subroutine. -
ps/trunk/source/lib/pch/pch_warnings.h
r9116 r10051 11 11 # pragma warning(disable:4103) // alignment changed after including header (boost has #pragma pack/pop in separate headers) 12 12 # pragma warning(disable:4127) // conditional expression is constant; rationale: see STMT in lib.h. 13 # pragma warning(disable:4324) // structure was padded due to __declspec(align()) 13 14 # pragma warning(disable:4351) // yes, default init of array entries is desired 14 15 # pragma warning(disable:4355) // 'this' used in base member initializer list -
ps/trunk/source/lib/sysdep/os/win/wnuma.cpp
r9580 r10051 28 28 #include "lib/timer.h" 29 29 #include "lib/module_init.h" 30 #include "lib/allocators/page_aligned.h" 30 #include "lib/sysdep/vm.h" 31 #include "lib/sysdep/acpi.h" 31 32 #include "lib/sysdep/os_cpu.h" 32 #include "lib/sysdep/acpi.h"33 33 #include "lib/sysdep/os/win/win.h" 34 34 #include "lib/sysdep/os/win/wutil.h" … … 375 375 { 376 376 const size_t size = 32*MiB; 377 void* mem = page_aligned_alloc(size);377 void* mem = vm::Allocate(size); 378 378 ASSUME_ALIGNED(mem, pageSize); 379 379 … … 396 396 (void)os_cpu_SetThreadAffinityMask(previousProcessorMask); 397 397 398 page_aligned_free(mem, size);398 vm::Free(mem, size); 399 399 400 400 return maxTime / minTime; … … 463 463 464 464 //----------------------------------------------------------------------------- 465 // allocator 466 // 467 //static bool VerifyPages(void* mem, size_t size, size_t pageSize, size_t node) 468 //{ 469 // WUTIL_FUNC(pQueryWorkingSetEx, BOOL, (HANDLE, PVOID, DWORD)); 470 // WUTIL_IMPORT_KERNEL32(QueryWorkingSetEx, pQueryWorkingSetEx); 471 // if(!pQueryWorkingSetEx) 472 // return true; // can't do anything 473 // 474 //#if WINVER >= 0x600 475 // size_t largePageSize = os_cpu_LargePageSize(); 476 // ENSURE(largePageSize != 0); // this value is needed for later 477 // 478 // // retrieve attributes of all pages constituting mem 479 // const size_t numPages = (size + pageSize-1) / pageSize; 480 // PSAPI_WORKING_SET_EX_INFORMATION* wsi = new PSAPI_WORKING_SET_EX_INFORMATION[numPages]; 481 // for(size_t i = 0; i < numPages; i++) 482 // wsi[i].VirtualAddress = (u8*)mem + i*pageSize; 483 // pQueryWorkingSetEx(GetCurrentProcess(), wsi, DWORD(sizeof(PSAPI_WORKING_SET_EX_INFORMATION)*numPages)); 484 // 485 // // ensure each is valid and allocated on the correct node 486 // for(size_t i = 0; i < numPages; i++) 487 // { 488 // const PSAPI_WORKING_SET_EX_BLOCK& attributes = wsi[i].VirtualAttributes; 489 // if(!attributes.Valid) 490 // return false; 491 // if((attributes.LargePage != 0) != (pageSize == largePageSize)) 492 // { 493 // debug_printf(L"NUMA: is not a large page\n"); 494 // return false; 495 // } 496 // if(attributes.Node != node) 497 // { 498 // debug_printf(L"NUMA: allocated from remote node\n"); 499 // return false; 500 // } 501 // } 502 // 503 // delete[] wsi; 504 //#else 505 // UNUSED2(mem); 506 // UNUSED2(size); 507 // UNUSED2(pageSize); 508 // UNUSED2(node); 509 //#endif 510 // 511 // return true; 512 //} 513 // 514 // 515 //void* numa_AllocateOnNode(size_t node, size_t size, LargePageDisposition largePageDisposition, size_t* ppageSize) 516 //{ 517 // ENSURE(node < numa_NumNodes()); 518 // 519 // // see if there will be enough memory (non-authoritative, for debug purposes only) 520 // { 521 // const size_t sizeMiB = size/MiB; 522 // const size_t availableMiB = numa_AvailableMemory(node); 523 // if(availableMiB < sizeMiB) 524 // debug_printf(L"NUMA: warning: node reports insufficient memory (%d vs %d MB)\n", availableMiB, sizeMiB); 525 // } 526 // 527 // size_t pageSize; // (used below even if ppageSize is zero) 528 // void* const mem = numa_Allocate(size, largePageDisposition, &pageSize); 529 // if(ppageSize) 530 // *ppageSize = pageSize; 531 // 532 // // we can't use VirtualAllocExNuma - it's only available in Vista and Server 2008. 533 // // workaround: fault in all pages now to ensure they are allocated from the 534 // // current node, then verify page attributes. 535 // const uintptr_t previousProcessorMask = os_cpu_SetThreadAffinityMask(numa_ProcessorMaskFromNode(node)); 536 // memset(mem, 0, size); 537 // (void)os_cpu_SetThreadAffinityMask(previousProcessorMask); 538 // 539 // VerifyPages(mem, size, pageSize, node); 540 // 541 // return mem; 542 //} 465 466 #if 0 467 468 static bool VerifyPages(void* mem, size_t size, size_t pageSize, size_t node) 469 { 470 WUTIL_FUNC(pQueryWorkingSetEx, BOOL, (HANDLE, PVOID, DWORD)); 471 WUTIL_IMPORT_KERNEL32(QueryWorkingSetEx, pQueryWorkingSetEx); 472 if(!pQueryWorkingSetEx) 473 return true; // can't do anything 474 475 #if WINVER >= 0x600 476 size_t largePageSize = os_cpu_LargePageSize(); 477 ENSURE(largePageSize != 0); // this value is needed for later 478 479 // retrieve attributes of all pages constituting mem 480 const size_t numPages = (size + pageSize-1) / pageSize; 481 PSAPI_WORKING_SET_EX_INFORMATION* wsi = new PSAPI_WORKING_SET_EX_INFORMATION[numPages]; 482 for(size_t i = 0; i < numPages; i++) 483 wsi[i].VirtualAddress = (u8*)mem + i*pageSize; 484 pQueryWorkingSetEx(GetCurrentProcess(), wsi, DWORD(sizeof(PSAPI_WORKING_SET_EX_INFORMATION)*numPages)); 485 486 // ensure each is valid and allocated on the correct node 487 for(size_t i = 0; i < numPages; i++) 488 { 489 const PSAPI_WORKING_SET_EX_BLOCK& attributes = wsi[i].VirtualAttributes; 490 if(!attributes.Valid) 491 return false; 492 if((attributes.LargePage != 0) != (pageSize == largePageSize)) 493 { 494 debug_printf(L"NUMA: is not a large page\n"); 495 return false; 496 } 497 if(attributes.Node != node) 498 { 499 debug_printf(L"NUMA: allocated from remote node\n"); 500 return false; 501 } 502 } 503 504 delete[] wsi; 505 #else 506 UNUSED2(mem); 507 UNUSED2(size); 508 UNUSED2(pageSize); 509 UNUSED2(node); 510 #endif 511 512 return true; 513 } 514 515 #endif -
ps/trunk/source/lib/sysdep/os/win/wposix/wmman.cpp
r9871 r10051 1 /* Copyright (c) 201 0Wildfire Games1 /* Copyright (c) 2011 Wildfire Games 2 2 * 3 3 * Permission is hereby granted, free of charge, to any person obtaining … … 28 28 29 29 30 //----------------------------------------------------------------------------- 31 // memory mapping 32 //----------------------------------------------------------------------------- 33 34 // convert POSIX PROT_* flags to their Win32 PAGE_* enumeration equivalents. 35 // used by mprotect. 36 static DWORD win32_prot(int prot) 37 { 30 unsigned MemoryProtectionFromPosix(int prot) 31 { 32 if(prot == PROT_NONE) 33 return PAGE_NOACCESS; 34 38 35 // this covers all 8 combinations of read|write|exec 39 // (note that "none" means all flags are 0).40 36 switch(prot & (PROT_READ|PROT_WRITE|PROT_EXEC)) 41 37 { 42 case PROT_NONE:43 return PAGE_NOACCESS;44 38 case PROT_READ: 45 39 return PAGE_READONLY; … … 58 52 case PROT_READ|PROT_WRITE|PROT_EXEC: 59 53 return PAGE_EXECUTE_READWRITE; 60 } 61 62 return 0; // UNREACHABLE 63 } 64 54 default: // none set 55 DEBUG_WARN_ERR(ERR::INVALID_FLAG); 56 return PAGE_NOACCESS; 57 } 58 59 // UNREACHABLE 60 } 61 62 63 //----------------------------------------------------------------------------- 64 // memory mapping 65 //----------------------------------------------------------------------------- 65 66 66 67 int mprotect(void* addr, size_t len, int prot) 67 68 { 68 const DWORD newProtect = win32_prot(prot);69 const DWORD newProtect = (DWORD)MemoryProtectionFromPosix(prot); 69 70 DWORD oldProtect; // required by VirtualProtect 70 71 const BOOL ok = VirtualProtect(addr, len, newProtect, &oldProtect); … … 105 106 106 107 const DWORD allocationType = want_commit? MEM_COMMIT : MEM_RESERVE; 107 const DWORD protect = win32_prot(prot);108 const DWORD protect = (DWORD)MemoryProtectionFromPosix(prot); 108 109 void* p = VirtualAlloc(start, len, allocationType, protect); 109 110 if(!p) -
ps/trunk/source/lib/sysdep/os/win/wposix/wmman.h
r9871 r10051 1 /* Copyright (c) 201 0Wildfire Games1 /* Copyright (c) 2011 Wildfire Games 2 2 * 3 3 * Permission is hereby granted, free of charge, to any person obtaining … … 57 57 extern int mprotect(void* addr, size_t len, int prot); 58 58 59 // convert POSIX PROT_* flags to their Win32 PAGE_* enumeration equivalents. 60 LIB_API unsigned MemoryProtectionFromPosix(int prot); 61 59 62 #endif // #ifndef INCLUDED_WMMAN
Note:
See TracChangeset
for help on using the changeset viewer.
