This Trac instance is not used for development anymore!

We migrated our development workflow to git and Gitea.
To test the future redirection, replace trac by ariadne in the page URL.

Changeset 10051 for ps


Ignore:
Timestamp:
08/21/11 13:00:09 (13 years ago)
Author:
Jan Wassenberg
Message:

lay groundwork for more efficient and flexible allocators. add new sysdep/vm that provides access to additional features on Windows (large pages, autocommit). add Pool/Arena allocators that avoid overhead and support arbitrary storage (not just the expensive virtual memory allocator in DynArray)

Location:
ps/trunk/source/lib
Files:
6 added
16 edited

Legend:

Unmodified
Added
Removed
  • ps/trunk/source/lib/alignment.h

    r9875 r10051  
    6666//
    6767
    68 static const size_t allocationAlignment = ARCH_AMD64? 16 : 8;
     68static const size_t allocationAlignment = 16;
    6969
    7070static const size_t KiB = size_t(1) << 10;
  • ps/trunk/source/lib/allocators/dynarray.cpp

    r10024 r10051  
    2929
    3030#include "lib/alignment.h"
    31 #include "lib/allocators/page_aligned.h"
     31#include "lib/sysdep/vm.h"
    3232
    3333
     
    4040    const size_t cur_size    = da->cur_size;
    4141    const size_t pos         = da->pos;
    42     const int prot           = da->prot;
    4342
    4443    // note: this happens if max_size == 0
     
    5352    if(pos > cur_size || pos > max_size_pa)
    5453        WARN_RETURN(ERR::_5);
    55     if(prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC))
    56         WARN_RETURN(ERR::_6);
    5754
    5855    return INFO::OK;
     
    6461Status da_alloc(DynArray* da, size_t max_size)
    6562{
     63    ENSURE(max_size != 0);
    6664    const size_t max_size_pa = Align<pageSize>(max_size);
    6765
    68     u8* p = 0;
    69     if(max_size_pa) // (avoid mmap failure)
    70         RETURN_STATUS_IF_ERR(mem_Reserve(max_size_pa, &p));
     66    u8* p = (u8*)vm::ReserveAddressSpace(max_size_pa);
     67    if(!p)
     68        return ERR::NO_MEM; // NOWARN (already done in vm)
    7169
    7270    da->base        = p;
     
    7472    da->cur_size    = 0;
    7573    da->cur_size_pa = 0;
    76     da->prot        = PROT_READ|PROT_WRITE;
    7774    da->pos         = 0;
    7875    CHECK_DA(da);
     
    8582    CHECK_DA(da);
    8683
    87     u8* p            = da->base;
    88     size_t size_pa   = da->max_size_pa;
     84    vm::ReleaseAddressSpace(da->base, da->max_size_pa);
    8985
    9086    // wipe out the DynArray for safety
    91     // (must be done here because mem_Release may fail)
    9287    memset(da, 0, sizeof(*da));
    9388
    94     if(size_pa)
    95         RETURN_STATUS_IF_ERR(mem_Release(p, size_pa));
    9689    return INFO::OK;
    9790}
     
    114107
    115108    u8* end = da->base + cur_size_pa;
     109    bool ok = true;
    116110    // expanding
    117111    if(size_delta_pa > 0)
    118         RETURN_STATUS_IF_ERR(mem_Commit(end, size_delta_pa, da->prot));
     112        ok = vm::Commit(uintptr_t(end), size_delta_pa);
    119113    // shrinking
    120114    else if(size_delta_pa < 0)
    121         RETURN_STATUS_IF_ERR(mem_Decommit(end+size_delta_pa, -size_delta_pa));
     115        ok = vm::Decommit(uintptr_t(end+size_delta_pa), -size_delta_pa);
    122116    // else: no change in page count, e.g. if going from size=1 to 2
    123117    // (we don't want mem_* to have to handle size=0)
     
    126120    da->cur_size_pa = new_size_pa;
    127121    CHECK_DA(da);
    128     return INFO::OK;
     122    return ok? INFO::OK : ERR::FAIL;
    129123}
    130124
     
    139133
    140134
    141 Status da_set_prot(DynArray* da, int prot)
    142 {
    143     CHECK_DA(da);
    144 
    145     da->prot = prot;
    146     RETURN_STATUS_IF_ERR(mem_Protect(da->base, da->cur_size_pa, prot));
    147 
    148     CHECK_DA(da);
    149     return INFO::OK;
    150 }
    151 
    152 
    153135Status da_append(DynArray* da, const void* data, size_t size)
    154136{
  • ps/trunk/source/lib/allocators/dynarray.h

    r10024 r10051  
    4343    size_t cur_size;     /// committed
    4444    size_t cur_size_pa;
    45 
    46     /**
    47      * mprotect flags applied to newly committed pages
    48      **/
    49     int prot;
    5045
    5146    size_t pos;
     
    9994
    10095/**
    101  * change access rights of the array memory.
    102  *
    103  * used to implement write-protection. affects the currently committed
    104  * pages as well as all subsequently added pages.
    105  *
    106  * @param da DynArray.
    107  * @param prot a combination of the PROT_* values used with mprotect.
    108  * @return Status.
    109  **/
    110 LIB_API Status da_set_prot(DynArray* da, int prot);
    111 
    112 /**
    11396 * "write" to array, i.e. copy from the given buffer.
    11497 *
  • ps/trunk/source/lib/allocators/overrun_protector.h

    r9361 r10051  
    2525
    2626#include "lib/config2.h"    // CONFIG2_ALLOCATORS_OVERRUN_PROTECTION
    27 #include "lib/allocators/page_aligned.h"
     27#include "lib/sysdep/vm.h"
    2828
    2929/**
     
    5555public:
    5656    OverrunProtector()
    57         : object(new(page_aligned_alloc(sizeof(T))) T())
     57        : object(new(vm::Allocate(sizeof(T))) T())
    5858    {
    5959        lock();
     
    6464        unlock();
    6565        object->~T();   // call dtor (since we used placement new)
    66         page_aligned_free(object, sizeof(T));
     66        vm::Free(object, sizeof(T));
    6767    }
    6868
     
    7676    {
    7777#if CONFIG2_ALLOCATORS_OVERRUN_PROTECTION
    78         mprotect(object, sizeof(T), PROT_NONE);
     78        vm::Protect(object, sizeof(T), PROT_NONE);
    7979#endif
    8080    }
     
    8484    {
    8585#if CONFIG2_ALLOCATORS_OVERRUN_PROTECTION
    86         mprotect(object, sizeof(T), PROT_READ|PROT_WRITE);
     86        vm::Protect(object, sizeof(T), PROT_READ|PROT_WRITE);
    8787#endif
    8888    }
  • ps/trunk/source/lib/allocators/pool.cpp

    r9961 r10051  
    1 /* Copyright (c) 2010 Wildfire Games
     1/* Copyright (c) 2011 Wildfire Games
    22 *
    33 * Permission is hereby granted, free of charge, to any person obtaining
     
    3030#include "lib/alignment.h"
    3131#include "lib/allocators/freelist.h"
     32#include "lib/allocators/allocator_adapters.h"
    3233
    3334#include "lib/timer.h"
    3435
     36namespace Allocators {
     37
     38template<class Storage>
     39struct BasicPoolTest
     40{
     41    void operator()() const
     42    {
     43        Pool<double, Storage> p(100);
     44        const size_t initialSpace = p.RemainingObjects();
     45        double* p1 = p.Allocate();
     46        ENSURE(p1 != 0);
     47        ENSURE(p.Contains(uintptr_t(p1)));
     48        ENSURE(p.RemainingObjects() == initialSpace-1);
     49        ENSURE(p.Contains(uintptr_t(p1)+1));
     50        ENSURE(p.Contains(uintptr_t(p1)+sizeof(double)-1));
     51        ENSURE(!p.Contains(uintptr_t(p1)-1));
     52        ENSURE(!p.Contains(uintptr_t(p1)+sizeof(double)));
     53        if(p.RemainingObjects() == 0)
     54            ENSURE(p.Allocate() == 0);  // full
     55        else
     56            ENSURE(p.Allocate() != 0);  // can still expand
     57        p.DeallocateAll();
     58        ENSURE(!p.Contains(uintptr_t(p1)));
     59
     60        p1 = p.Allocate();
     61        ENSURE(p1 != 0);
     62        ENSURE(p.Contains(uintptr_t(p1)));
     63        ENSURE(p.RemainingObjects() == initialSpace-1);
     64        double* p2 = p.Allocate();
     65        ENSURE(p2 != 0);
     66        ENSURE(p.Contains(uintptr_t(p2)));
     67        ENSURE(p.RemainingObjects() == initialSpace-2);
     68        ENSURE(p2 == (double*)(uintptr_t(p1)+sizeof(double)));
     69        if(p.RemainingObjects() == 0)
     70            ENSURE(p.Allocate() == 0);  // full
     71        else
     72            ENSURE(p.Allocate() != 0);  // can still expand
     73    }
     74};
     75
     76void TestPool()
     77{
     78    ForEachStorage<BasicPoolTest>();
     79}
     80
     81}   // namespace Allocators
     82
     83
    3584TIMER_ADD_CLIENT(tc_pool_alloc);
    36 
    3785
    3886Status pool_create(Pool* p, size_t max_size, size_t el_size)
  • ps/trunk/source/lib/allocators/pool.h

    r9944 r10051  
    1 /* Copyright (c) 2010 Wildfire Games
     1/* Copyright (c) 2011 Wildfire Games
    22 *
    33 * Permission is hereby granted, free of charge, to any person obtaining
     
    2222
    2323/*
    24  * pool allocator
     24 * pool allocator (fixed-size blocks, freelist).
    2525 */
    2626
    2727#ifndef INCLUDED_ALLOCATORS_POOL
    2828#define INCLUDED_ALLOCATORS_POOL
     29
     30#include "lib/bits.h"   // ROUND_UP
     31#include "lib/allocators/allocator_policies.h"
     32
     33namespace Allocators {
     34   
     35/**
     36 * allocator design parameters:
     37 * - O(1) allocation and deallocation;
     38 * - fixed-size objects;
     39 * - support for deallocating all objects;
     40 * - consecutive allocations are back-to-back;
     41 * - objects are aligned to the pointer size.
     42 **/
     43template<typename T, class Storage = Storage_Fixed<> >
     44class Pool
     45{
     46public:
     47    // (must round up because freelist stores pointers inside objects)
     48    static const size_t objectSize = ROUND_UP(sizeof(T), sizeof(intptr_t));
     49
     50    Pool(size_t maxObjects)
     51        : storage(maxObjects*objectSize)
     52    {
     53        DeallocateAll();
     54    }
     55
     56    size_t RemainingObjects()
     57    {
     58        return (storage.MaxCapacity() - end) / objectSize;
     59    }
     60
     61    T* Allocate()
     62    {
     63        void* p = mem_freelist_Detach(freelist);
     64        if(p)
     65        {
     66            ASSERT(Contains(p));
     67            return (T*)p;
     68        }
     69
     70        return (T*)StorageAppend(storage, end, objectSize);
     71    }
     72
     73    void Deallocate(T* p)
     74    {
     75        ASSERT(Contains(p));
     76        mem_freelist_AddToFront(freelist, p);
     77    }
     78
     79    void DeallocateAll()
     80    {
     81        freelist = mem_freelist_Sentinel();
     82        end = 0;
     83    }
     84
     85    // @return whether the address lies within the previously allocated range.
     86    bool Contains(uintptr_t address) const
     87    {
     88        return (address - storage.Address()) < end;
     89    }
     90
     91private:
     92    Storage storage;
     93    size_t end;
     94    void* freelist;
     95};
     96
     97LIB_API void TestPool();
     98
     99}   // namespace Allocators
     100
    29101
    30102#include "lib/allocators/dynarray.h"
     
    147219
    148220/**
    149  * C++ wrapper on top of pool_alloc for fixed-size allocations (determined by sizeof(T))
    150  *
    151  * T must be POD (Plain Old Data) because it is memset to 0!
    152  **/
    153 template<class T>
    154 class PoolAllocator
    155 {
    156 public:
    157     explicit PoolAllocator(size_t maxElements)
    158     {
    159         (void)pool_create(&m_pool, maxElements*sizeof(T), sizeof(T));
    160     }
    161 
    162     ~PoolAllocator()
    163     {
    164         (void)pool_destroy(&m_pool);
    165     }
    166 
    167     T* AllocateZeroedMemory()
    168     {
    169         T* t = (T*)pool_alloc(&m_pool, 0);
    170         if(!t)
    171             throw std::bad_alloc();
    172         memset(t, 0, sizeof(T));
    173         return t;
    174     }
    175 
    176     void Free(T* t)
    177     {
    178         pool_free(&m_pool, t);
    179     }
    180 
    181 private:
    182     Pool m_pool;
    183 };
    184 
    185 /**
    186221 * C++ wrapper on top of pool_alloc for variable-sized allocations.
    187222 * Memory is returned uninitialised.
  • ps/trunk/source/lib/allocators/tests/test_allocators.h

    r10024 r10051  
    3636        TS_ASSERT_OK(da_alloc(&da, 1000));
    3737        TS_ASSERT_OK(da_set_size(&da, 1000));
    38         TS_ASSERT_OK(da_set_prot(&da, PROT_NONE));
    3938        TS_ASSERT_OK(da_free(&da));
    4039    }
  • ps/trunk/source/lib/allocators/unique_range.cpp

    r9871 r10051  
    2929
    3030
     31// NB: callers should skip this if *idxDeleterOut != 0 (avoids the overhead
     32// of an unnecessary indirect function call)
    3133void RegisterUniqueRangeDeleter(UniqueRangeDeleter deleter, volatile IdxDeleter* idxDeleterOut)
    3234{
     
    4547    deleters[idxDeleter] = deleter;
    4648    COMPILER_FENCE;
    47     *idxDeleterOut = idxDeleter;    // linearization point
     49    *idxDeleterOut = idxDeleter;
    4850}
    4951
     
    6769
    6870    static volatile IdxDeleter idxDeleterAligned;
    69     if(idxDeleterAligned == 0)
     71    if(idxDeleterAligned == 0)  // (optional optimization)
    7072        RegisterUniqueRangeDeleter(FreeAligned, &idxDeleterAligned);
    7173
    7274    return RVALUE(UniqueRange(p, size, idxDeleterAligned));
    7375}
     76
     77
     78UniqueRange AllocateVM(size_t size, vm::PageType pageType, int prot)
     79{
     80    const UniqueRange::pointer p = vm::Allocate(size, pageType, prot);
     81
     82    static volatile IdxDeleter idxDeleter;
     83    if(idxDeleter == 0) // (optional optimization)
     84        RegisterUniqueRangeDeleter(vm::Free, &idxDeleter);
     85
     86    return RVALUE(UniqueRange(p, size, idxDeleter));
     87}
  • ps/trunk/source/lib/allocators/unique_range.h

    r9871 r10051  
    44#include "lib/lib_api.h"
    55#include "lib/alignment.h"  // allocationAlignment
     6#include "lib/sysdep/vm.h"
    67
    78// we usually don't hold multiple references to allocations, so unique_ptr
     
    192193LIB_API UniqueRange AllocateAligned(size_t size, size_t alignment);
    193194
     195LIB_API UniqueRange AllocateVM(size_t size, vm::PageType pageSize = vm::kDefault, int prot = PROT_READ|PROT_WRITE);
     196
     197
    194198#endif  // #ifndef INCLUDED_ALLOCATORS_UNIQUE_RANGE
  • ps/trunk/source/lib/bits.h

    r9423 r10051  
    230230}
    231231
     232// evaluates to an expression suitable as an initializer
     233// for constant static data members.
     234#define ROUND_UP(n, multiple) (((n) + (multiple)-1) & ~((multiple)-1))
     235
    232236
    233237template<typename T>
  • ps/trunk/source/lib/debug.cpp

    r9875 r10051  
    3434#include "lib/alignment.h"
    3535#include "lib/app_hooks.h"
    36 #include "lib/allocators/page_aligned.h"
    3736#include "lib/fnv_hash.h"
     37#include "lib/sysdep/vm.h"
    3838#include "lib/sysdep/cpu.h" // cpu_CAS
    3939#include "lib/sysdep/sysdep.h"
     
    208208void debug_FreeErrorMessage(ErrorMessageMem* emm)
    209209{
    210     page_aligned_free(emm->pa_mem, messageSize);
     210    vm::Free(emm->pa_mem, messageSize);
    211211}
    212212
     
    275275
    276276    // rationale: see ErrorMessageMem
    277     emm->pa_mem = page_aligned_alloc(messageSize);
     277    emm->pa_mem = vm::Allocate(messageSize);
    278278    wchar_t* const buf = (wchar_t*)emm->pa_mem;
    279279    if(!buf)
  • ps/trunk/source/lib/debug.h

    r9875 r10051  
    538538    // - error messages with stack traces require a good deal of memory
    539539    //   (hundreds of KB). static buffers of that size are undesirable.
    540     // - the heap may be corrupted, so don't use malloc. allocator.h's
    541     //   page_aligned_malloc (implemented via mmap) should be safe.
     540    // - the heap may be corrupted, so don't use malloc.
     541    //   instead, "lib/sysdep/vm.h" functions should be safe.
    542542    // - alloca is a bit iffy (the stack may be maxed out), non-portable and
    543543    //   complicates the code because it can't be allocated by a subroutine.
  • ps/trunk/source/lib/pch/pch_warnings.h

    r9116 r10051  
    1111# pragma warning(disable:4103)  // alignment changed after including header (boost has #pragma pack/pop in separate headers)
    1212# pragma warning(disable:4127)  // conditional expression is constant; rationale: see STMT in lib.h.
     13# pragma warning(disable:4324)  // structure was padded due to __declspec(align())
    1314# pragma warning(disable:4351)  // yes, default init of array entries is desired
    1415# pragma warning(disable:4355)  // 'this' used in base member initializer list
  • ps/trunk/source/lib/sysdep/os/win/wnuma.cpp

    r9580 r10051  
    2828#include "lib/timer.h"
    2929#include "lib/module_init.h"
    30 #include "lib/allocators/page_aligned.h"
     30#include "lib/sysdep/vm.h"
     31#include "lib/sysdep/acpi.h"
    3132#include "lib/sysdep/os_cpu.h"
    32 #include "lib/sysdep/acpi.h"
    3333#include "lib/sysdep/os/win/win.h"
    3434#include "lib/sysdep/os/win/wutil.h"
     
    375375{
    376376    const size_t size = 32*MiB;
    377     void* mem = page_aligned_alloc(size);
     377    void* mem = vm::Allocate(size);
    378378    ASSUME_ALIGNED(mem, pageSize);
    379379
     
    396396    (void)os_cpu_SetThreadAffinityMask(previousProcessorMask);
    397397
    398     page_aligned_free(mem, size);
     398    vm::Free(mem, size);
    399399
    400400    return maxTime / minTime;
     
    463463
    464464//-----------------------------------------------------------------------------
    465 // allocator
    466 //
    467 //static bool VerifyPages(void* mem, size_t size, size_t pageSize, size_t node)
    468 //{
    469 //  WUTIL_FUNC(pQueryWorkingSetEx, BOOL, (HANDLE, PVOID, DWORD));
    470 //  WUTIL_IMPORT_KERNEL32(QueryWorkingSetEx, pQueryWorkingSetEx);
    471 //  if(!pQueryWorkingSetEx)
    472 //      return true;    // can't do anything
    473 //
    474 //#if WINVER >= 0x600
    475 //  size_t largePageSize = os_cpu_LargePageSize();
    476 //  ENSURE(largePageSize != 0); // this value is needed for later
    477 //
    478 //  // retrieve attributes of all pages constituting mem
    479 //  const size_t numPages = (size + pageSize-1) / pageSize;
    480 //  PSAPI_WORKING_SET_EX_INFORMATION* wsi = new PSAPI_WORKING_SET_EX_INFORMATION[numPages];
    481 //  for(size_t i = 0; i < numPages; i++)
    482 //      wsi[i].VirtualAddress = (u8*)mem + i*pageSize;
    483 //  pQueryWorkingSetEx(GetCurrentProcess(), wsi, DWORD(sizeof(PSAPI_WORKING_SET_EX_INFORMATION)*numPages));
    484 //
    485 //  // ensure each is valid and allocated on the correct node
    486 //  for(size_t i = 0; i < numPages; i++)
    487 //  {
    488 //      const PSAPI_WORKING_SET_EX_BLOCK& attributes = wsi[i].VirtualAttributes;
    489 //      if(!attributes.Valid)
    490 //          return false;
    491 //      if((attributes.LargePage != 0) != (pageSize == largePageSize))
    492 //      {
    493 //          debug_printf(L"NUMA: is not a large page\n");
    494 //          return false;
    495 //      }
    496 //      if(attributes.Node != node)
    497 //      {
    498 //          debug_printf(L"NUMA: allocated from remote node\n");
    499 //          return false;
    500 //      }
    501 //  }
    502 //
    503 //  delete[] wsi;
    504 //#else
    505 //  UNUSED2(mem);
    506 //  UNUSED2(size);
    507 //  UNUSED2(pageSize);
    508 //  UNUSED2(node);
    509 //#endif
    510 //
    511 //  return true;
    512 //}
    513 //
    514 //
    515 //void* numa_AllocateOnNode(size_t node, size_t size, LargePageDisposition largePageDisposition, size_t* ppageSize)
    516 //{
    517 //  ENSURE(node < numa_NumNodes());
    518 //
    519 //  // see if there will be enough memory (non-authoritative, for debug purposes only)
    520 //  {
    521 //      const size_t sizeMiB = size/MiB;
    522 //      const size_t availableMiB = numa_AvailableMemory(node);
    523 //      if(availableMiB < sizeMiB)
    524 //          debug_printf(L"NUMA: warning: node reports insufficient memory (%d vs %d MB)\n", availableMiB, sizeMiB);
    525 //  }
    526 //
    527 //  size_t pageSize;    // (used below even if ppageSize is zero)
    528 //  void* const mem = numa_Allocate(size, largePageDisposition, &pageSize);
    529 //  if(ppageSize)
    530 //      *ppageSize = pageSize;
    531 //
    532 //  // we can't use VirtualAllocExNuma - it's only available in Vista and Server 2008.
    533 //  // workaround: fault in all pages now to ensure they are allocated from the
    534 //  // current node, then verify page attributes.
    535 //  const uintptr_t previousProcessorMask = os_cpu_SetThreadAffinityMask(numa_ProcessorMaskFromNode(node));
    536 //  memset(mem, 0, size);
    537 //  (void)os_cpu_SetThreadAffinityMask(previousProcessorMask);
    538 //
    539 //  VerifyPages(mem, size, pageSize, node);
    540 //
    541 //  return mem;
    542 //}
     465
     466#if 0
     467
     468static bool VerifyPages(void* mem, size_t size, size_t pageSize, size_t node)
     469{
     470    WUTIL_FUNC(pQueryWorkingSetEx, BOOL, (HANDLE, PVOID, DWORD));
     471    WUTIL_IMPORT_KERNEL32(QueryWorkingSetEx, pQueryWorkingSetEx);
     472    if(!pQueryWorkingSetEx)
     473        return true;    // can't do anything
     474
     475#if WINVER >= 0x600
     476    size_t largePageSize = os_cpu_LargePageSize();
     477    ENSURE(largePageSize != 0); // this value is needed for later
     478
     479    // retrieve attributes of all pages constituting mem
     480    const size_t numPages = (size + pageSize-1) / pageSize;
     481    PSAPI_WORKING_SET_EX_INFORMATION* wsi = new PSAPI_WORKING_SET_EX_INFORMATION[numPages];
     482    for(size_t i = 0; i < numPages; i++)
     483        wsi[i].VirtualAddress = (u8*)mem + i*pageSize;
     484    pQueryWorkingSetEx(GetCurrentProcess(), wsi, DWORD(sizeof(PSAPI_WORKING_SET_EX_INFORMATION)*numPages));
     485
     486    // ensure each is valid and allocated on the correct node
     487    for(size_t i = 0; i < numPages; i++)
     488    {
     489        const PSAPI_WORKING_SET_EX_BLOCK& attributes = wsi[i].VirtualAttributes;
     490        if(!attributes.Valid)
     491            return false;
     492        if((attributes.LargePage != 0) != (pageSize == largePageSize))
     493        {
     494            debug_printf(L"NUMA: is not a large page\n");
     495            return false;
     496        }
     497        if(attributes.Node != node)
     498        {
     499            debug_printf(L"NUMA: allocated from remote node\n");
     500            return false;
     501        }
     502    }
     503
     504    delete[] wsi;
     505#else
     506    UNUSED2(mem);
     507    UNUSED2(size);
     508    UNUSED2(pageSize);
     509    UNUSED2(node);
     510#endif
     511
     512    return true;
     513}
     514
     515#endif
  • ps/trunk/source/lib/sysdep/os/win/wposix/wmman.cpp

    r9871 r10051  
    1 /* Copyright (c) 2010 Wildfire Games
     1/* Copyright (c) 2011 Wildfire Games
    22 *
    33 * Permission is hereby granted, free of charge, to any person obtaining
     
    2828
    2929
    30 //-----------------------------------------------------------------------------
    31 // memory mapping
    32 //-----------------------------------------------------------------------------
    33 
    34 // convert POSIX PROT_* flags to their Win32 PAGE_* enumeration equivalents.
    35 // used by mprotect.
    36 static DWORD win32_prot(int prot)
    37 {
     30unsigned MemoryProtectionFromPosix(int prot)
     31{
     32    if(prot == PROT_NONE)
     33        return PAGE_NOACCESS;
     34
    3835    // this covers all 8 combinations of read|write|exec
    39     // (note that "none" means all flags are 0).
    4036    switch(prot & (PROT_READ|PROT_WRITE|PROT_EXEC))
    4137    {
    42     case PROT_NONE:
    43         return PAGE_NOACCESS;
    4438    case PROT_READ:
    4539        return PAGE_READONLY;
     
    5852    case PROT_READ|PROT_WRITE|PROT_EXEC:
    5953        return PAGE_EXECUTE_READWRITE;
    60     }
    61 
    62     return 0;   // UNREACHABLE
    63 }
    64 
     54    default:    // none set
     55        DEBUG_WARN_ERR(ERR::INVALID_FLAG);
     56        return PAGE_NOACCESS;
     57    }
     58
     59    // UNREACHABLE
     60}
     61
     62
     63//-----------------------------------------------------------------------------
     64// memory mapping
     65//-----------------------------------------------------------------------------
    6566
    6667int mprotect(void* addr, size_t len, int prot)
    6768{
    68     const DWORD newProtect = win32_prot(prot);
     69    const DWORD newProtect = (DWORD)MemoryProtectionFromPosix(prot);
    6970    DWORD oldProtect;   // required by VirtualProtect
    7071    const BOOL ok = VirtualProtect(addr, len, newProtect, &oldProtect);
     
    105106
    106107    const DWORD allocationType = want_commit? MEM_COMMIT : MEM_RESERVE;
    107     const DWORD protect = win32_prot(prot);
     108    const DWORD protect = (DWORD)MemoryProtectionFromPosix(prot);
    108109    void* p = VirtualAlloc(start, len, allocationType, protect);
    109110    if(!p)
  • ps/trunk/source/lib/sysdep/os/win/wposix/wmman.h

    r9871 r10051  
    1 /* Copyright (c) 2010 Wildfire Games
     1/* Copyright (c) 2011 Wildfire Games
    22 *
    33 * Permission is hereby granted, free of charge, to any person obtaining
     
    5757extern int mprotect(void* addr, size_t len, int prot);
    5858
     59// convert POSIX PROT_* flags to their Win32 PAGE_* enumeration equivalents.
     60LIB_API unsigned MemoryProtectionFromPosix(int prot);
     61
    5962#endif  // #ifndef INCLUDED_WMMAN
Note: See TracChangeset for help on using the changeset viewer.