- Timestamp:
- 06/03/04 02:17:24 (21 years ago)
- Location:
- ps/trunk/source/lib/res
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
ps/trunk/source/lib/res/file.cpp
r371 r373 108 108 // 109 109 // can only be called once, by design (see below). rel_path is trusted. 110 int file_rel_chdir(const char* argv0, const char* rel_path)110 int file_rel_chdir(const char* argv0, const char* const rel_path) 111 111 { 112 112 const char* msg = 0; … … 300 300 301 301 302 int file_stat(const char* path, struct stat*s)302 int file_stat(const char* const path, struct stat* const s) 303 303 { 304 304 char n_path[PATH_MAX+1]; … … 362 362 else if(f->fd < 0) 363 363 msg = "File fd invalid (< 0)"; 364 else if((f->mapping != 0) ^ (f->map_refs != 0)) 365 msg = "File mapping without refs"; 364 366 #ifndef NDEBUG 365 367 else if(!f->fn_hash) … … 388 390 389 391 390 int file_open(const char* p_fn, uint flags, File*f)392 int file_open(const char* const p_fn, const uint flags, File* const f) 391 393 { 392 394 memset(f, 0, sizeof(File)); … … 424 426 #endif 425 427 426 f->flags = flags; 427 f->size = size; 428 f->fn_hash = fnv_hash(n_fn); // copy filename insteaD? 429 f->mapping = 0; 430 f->fd = fd; 428 f->flags = flags; 429 f->size = size; 430 f->fn_hash = fnv_hash(n_fn); // copy filename instead? 431 f->mapping = 0; 432 f->map_refs = 0; 433 f->fd = fd; 431 434 } 432 435 433 436 invalid_f: 434 CHECK_FILE(f) 437 CHECK_FILE(f); 435 438 436 439 return 0; … … 438 441 439 442 440 int file_close(File* f)443 int file_close(File* const f) 441 444 { 442 445 CHECK_FILE(f); … … 468 471 469 472 470 int ll_start_io(File* f, off_t ofs, size_t size, void* p, ll_cb*lcb)471 { 472 CHECK_FILE(f) 473 int ll_start_io(File* const f, const off_t ofs, size_t size, void* const p, ll_cb* const lcb) 474 { 475 CHECK_FILE(f); 473 476 474 477 if(size == 0) … … 477 480 return ERR_INVALID_PARAM; 478 481 } 479 if(ofs >= f->size) 480 { 481 debug_warn("ll_start_io: ofs beyond f->size"); 482 return -1; 483 } 484 485 off_t bytes_left = f->size - ofs; // > 0 486 int op = (f->flags & FILE_WRITE)? LIO_WRITE : LIO_READ; 482 483 const int op = (f->flags & FILE_WRITE)? LIO_WRITE : LIO_READ; 487 484 488 485 // cut off at EOF. 489 486 // avoid min() due to type conversion warnings. 487 const off_t bytes_left = f->size - ofs; 488 if(bytes_left < 0) 489 return ERR_EOF; 490 490 if((off_t)size > bytes_left) 491 491 size = (size_t)bytes_left; … … 507 507 // as a convenience, return a pointer to the transfer buffer 508 508 // (rather than expose the ll_cb internals) 509 ssize_t ll_wait_io(ll_cb* lcb, void*& p)509 ssize_t ll_wait_io(ll_cb* const lcb, void*& p) 510 510 { 511 511 aiocb* cb = &lcb->cb; … … 555 555 556 556 // make sure block_num fits in 32 bits 557 size_t block_num = ofs / BLOCK_SIZE;557 const size_t block_num = ofs / BLOCK_SIZE; 558 558 assert(block_num <= 0xffffffff); 559 559 … … 574 574 get_mem_status(); 575 575 // TODO: calculate size 576 size_t num_blocks = 16;576 const size_t num_blocks = 16; 577 577 578 578 // evil: waste some mem (up to one block) to make sure the first block … … 581 581 // 582 582 // allocator will free the whole thing at exit. 583 void* pool = mem_alloc((num_blocks+1) * BLOCK_SIZE);583 void* const pool = mem_alloc((num_blocks+1) * BLOCK_SIZE); 584 584 if(!pool) 585 585 return 0; 586 586 587 uintptr_t start = round_up((uintptr_t)pool + 1, BLOCK_SIZE);587 const uintptr_t start = round_up((uintptr_t)pool + 1, BLOCK_SIZE); 588 588 // +1 => if already block-aligned, add a whole block! 589 589 … … 601 601 } 602 602 603 void** entry = c.assign(id);603 void** const entry = c.assign(id); 604 604 if(!entry) 605 605 return 0; 606 void* block = *entry;606 void* const block = *entry; 607 607 608 608 if(c.lock(id, true) < 0) … … 624 624 static int block_retrieve(const u64 id, void*& p) 625 625 { 626 void** entry = c.retrieve(id);626 void** const entry = c.retrieve(id); 627 627 if(entry) 628 628 { … … 656 656 int file_free_buf(void*& p) 657 657 { 658 uintptr_t _p = (uintptr_t)p;659 void* actual_p = (void*)(_p - (_p % BLOCK_SIZE)); // round down658 const uintptr_t _p = (uintptr_t)p; 659 void* const actual_p = (void*)(_p - (_p % BLOCK_SIZE)); // round down 660 660 661 661 return mem_free(actual_p); … … 797 797 798 798 799 static int io_free( Handle hio)799 static int io_free(const Handle hio) 800 800 { 801 801 H_DEREF(hio, IO, io); … … 833 833 struct FindBlock : public std::binary_function<Handle, u64, bool> 834 834 { 835 bool operator()( Handle hio,u64 block_id) const835 bool operator()(const Handle hio, const u64 block_id) const 836 836 { 837 837 // can't use H_DEREF - we return bool … … 846 846 }; 847 847 848 static Handle io_find( u64 block_id)848 static Handle io_find(const u64 block_id) 849 849 { 850 850 IOList::const_iterator it; … … 871 871 // transfers of more than 1 block (including padding) are allowed, but do not 872 872 // go through the cache. don't see any case where that's necessary, though. 873 Handle file_start_io(File* f, off_t user_ofs, size_t user_size, void*user_p)873 Handle file_start_io(File* const f, const off_t user_ofs, size_t user_size, void* const user_p) 874 874 { 875 875 int err; 876 876 877 CHECK_FILE(f) 877 CHECK_FILE(f); 878 878 879 879 if(user_size == 0) … … 882 882 return ERR_INVALID_PARAM; 883 883 } 884 if(user_ofs >= f->size) 885 { 886 debug_warn("file_start_io: user_ofs beyond f->size"); 887 return -1; 888 } 889 890 const off_t bytes_left = f->size - user_ofs; // > 0 891 int op = (f->flags & FILE_WRITE)? LIO_WRITE : LIO_READ; 892 893 // don't read beyond EOF 894 if((off_t)user_size > bytes_left) // avoid min() - it wants int 884 885 const int op = (f->flags & FILE_WRITE)? LIO_WRITE : LIO_READ; 886 887 // cut off at EOF. 888 // avoid min() due to type conversion warnings. 889 const off_t bytes_left = f->size - user_ofs; 890 if(bytes_left < 0) 891 return ERR_EOF; 892 if((off_t)user_size > bytes_left) 895 893 user_size = (size_t)bytes_left; 896 // guaranteed to fit in user_size, since user_size > bytes_left 897 894 // guaranteed to fit, since size was > bytes_left 898 895 899 896 u64 block_id = block_make_id(f->fn_hash, user_ofs); … … 925 922 926 923 off_t ofs = user_ofs; 927 size_t padding = ofs % BLOCK_SIZE;924 const size_t padding = ofs % BLOCK_SIZE; 928 925 ofs -= (off_t)padding; 929 size_t size = round_up(padding + user_size, BLOCK_SIZE);926 const size_t size = round_up(padding + user_size, BLOCK_SIZE); 930 927 931 928 … … 1072 1069 #endif 1073 1070 1074 CHECK_FILE(f) 1071 CHECK_FILE(f); 1075 1072 1076 1073 const bool is_write = (f->flags == FILE_WRITE); … … 1088 1085 if(bytes_left < 0) 1089 1086 return ERR_EOF; 1090 if((off_t)raw_size > bytes_left)1087 if((off_t)raw_size > bytes_left) 1091 1088 raw_size = (size_t)bytes_left; 1092 1089 // guaranteed to fit, since size was > bytes_left … … 1289 1286 1290 1287 1291 int file_map(File* f, void*& p, size_t& size) 1292 { 1293 CHECK_FILE(f) 1294 1288 // no significance aside from preventing uint overflow. 1289 static const uint MAX_MAP_REFS = 255; 1290 1291 1292 // map the entire file <f> into memory. if already currently mapped, 1293 // return the previous mapping (reference-counted). 1294 // output parameters are zeroed on failure. 1295 // 1296 // the mapping will be removed (if still open) when its file is closed. 1297 // however, map/unmap calls should still be paired so that the mapping 1298 // may be removed when no longer needed. 1299 // 1300 // rationale: reference counting is required for zip_map: several 1301 // Zip "mappings" each reference one ZArchive's actual file mapping. 1302 // implement it here so that we also get refcounting for normal files. 1303 int file_map(File* const f, void*& p, size_t& size) 1304 { 1305 p = 0; 1306 size = 0; 1307 1308 CHECK_FILE(f); 1309 1310 // already mapped - increase refcount and return previous mapping. 1311 if(f->mapping) 1312 { 1313 // prevent overflow; if we have this many refs, should find out why. 1314 if(f->map_refs >= MAX_MAP_REFS) 1315 { 1316 debug_warn("file_map: too many references to mapping"); 1317 return -1; 1318 } 1319 f->map_refs++; 1320 goto have_mapping; 1321 } 1322 1323 const int prot = (f->flags & FILE_WRITE)? PROT_WRITE : PROT_READ; 1324 f->mapping = mmap((void*)0, size, prot, MAP_PRIVATE, f->fd, (off_t)0); 1325 if(!f->mapping) 1326 return ERR_NO_MEM; 1327 1328 f->map_refs = 1; 1329 1330 have_mapping: 1295 1331 p = f->mapping; 1296 1332 size = f->size; 1297 1298 // already mapped - done1299 if(p)1300 return 0;1301 1302 int prot = (f->flags & FILE_WRITE)? PROT_WRITE : PROT_READ;1303 1304 p = f->mapping = mmap((void*)0, (uint)size, prot, MAP_PRIVATE, f->fd, (long)0);1305 if(!p)1306 {1307 size = 0;1308 return ERR_NO_MEM;1309 }1310 1311 1333 return 0; 1312 1334 } 1313 1335 1314 1336 1315 int file_unmap(File* f) 1316 { 1317 CHECK_FILE(f) 1318 1319 void* p = f->mapping; 1337 // decrement the reference count for the mapping belonging to file <f>. 1338 // fail if there are no references; remove the mapping if the count reaches 0. 1339 // 1340 // the mapping will be removed (if still open) when its file is closed. 1341 // however, map/unmap calls should still be paired so that the mapping 1342 // may be removed when no longer needed. 1343 int file_unmap(File* const f) 1344 { 1345 CHECK_FILE(f); 1346 1347 void* const p = f->mapping; 1348 f->mapping = 0; 1349 // don't reset size - the file is still open. 1350 1320 1351 // not currently mapped 1321 1352 if(!p) 1322 1353 return -1; 1323 f->mapping = 0;1324 // don't reset size - the file is still open.1325 1354 1326 1355 return munmap(p, (uint)f->size); -
ps/trunk/source/lib/res/file.h
r371 r373 34 34 // it is accessed by VFS and must be the same for both (union). 35 35 // dirty, but necessary because VFile is pushing the HDATA size limit. 36 int flags;36 uint flags; 37 37 off_t size; 38 38 … … 40 40 41 41 void* mapping; 42 uint map_refs; 43 42 44 int fd; 43 45 }; … … 89 91 extern int file_close(File* f); 90 92 93 94 // 95 // memory mapping 96 // 97 98 // map the entire file <f> into memory. if already currently mapped, 99 // return the previous mapping (reference-counted). 100 // output parameters are zeroed on failure. 101 // 102 // the mapping will be removed (if still open) when its file is closed. 103 // however, map/unmap calls should still be paired so that the mapping 104 // may be removed when no longer needed. 105 // 106 // rationale: reference counting is required for zip_map: several 107 // Zip "mappings" each reference one ZArchive's actual file mapping. 108 // implement it here so that we also get refcounting for normal files. 91 109 extern int file_map(File* f, void*& p, size_t& size); 110 111 // decrement the reference count for the mapping belonging to file <f>. 112 // fail if there are no references; remove the mapping if the count reaches 0. 113 // 114 // the mapping will be removed (if still open) when its file is closed. 115 // however, map/unmap calls should still be paired so that the mapping 116 // may be removed when no longer needed. 92 117 extern int file_unmap(File* f); 118 119 120 // 121 // async IO 122 // 93 123 94 124 extern Handle file_start_io(File* f, off_t ofs, size_t size, void* buf); -
ps/trunk/source/lib/res/vfs.cpp
r371 r373 963 963 964 964 965 static int& vf_flags(VFile* vf)965 static uint& vf_flags(VFile* vf) 966 966 { 967 967 assert(offsetof(struct File, flags) == offsetof(struct ZFile, flags)); … … 973 973 static void VFile_init(VFile* vf, va_list args) 974 974 { 975 int flags = va_arg(args, int);975 uint flags = va_arg(args, int); 976 976 vf_flags(vf) = flags; 977 977 } … … 980 980 static void VFile_dtor(VFile* vf) 981 981 { 982 int& flags = vf_flags(vf);982 uint& flags = vf_flags(vf); 983 983 984 984 if(flags & VF_OPEN) … … 999 999 static int VFile_reload(VFile* vf, const char* path) 1000 1000 { 1001 int& flags = vf_flags(vf);1001 uint& flags = vf_flags(vf); 1002 1002 1003 1003 // we're done if file is already open. need to check this because reload order … … 1133 1133 1134 1134 1135 int vfs_store(const char* fn, void* p,size_t size)1135 int vfs_store(const char* const fn, void* p, const size_t size) 1136 1136 { 1137 1137 Handle hf = vfs_open(fn, VFS_WRITE); … … 1139 1139 return (int)hf; // error code 1140 1140 H_DEREF(hf, VFile, vf); 1141 int ret = vfs_io(hf, 0, size, p);1141 const int ret = vfs_io(hf, 0, size, p); 1142 1142 vfs_close(hf); 1143 1143 return ret; … … 1145 1145 1146 1146 1147 1148 1149 int vfs_map(const Handle hf, uint flags, void*& p, size_t& size) 1150 { 1151 H_DEREF(hf, VFile, vf); 1152 1153 if(vf_flags(vf) & VF_ZIP) 1154 CHECK_ERR(zip_map(&vf->zf, p, size)); 1155 else 1156 CHECK_ERR(file_map(&vf->f, p, size)); 1157 return 0; 1158 } 1159 1160 1161 int vfs_unmap(Handle hf) 1162 { 1147 // 1148 // memory mapping 1149 // 1150 1151 // map the entire file <hf> into memory. if already currently mapped, 1152 // return the previous mapping (reference-counted). 1153 // output parameters are zeroed on failure. 1154 // 1155 // the mapping will be removed (if still open) when its file is closed. 1156 // however, map/unmap calls should still be paired so that the mapping 1157 // may be removed when no longer needed. 1158 int vfs_map(const Handle hf, const uint flags, void*& p, size_t& size) 1159 { 1160 UNUSED(flags); 1161 1162 p = 0; 1163 size = 0; 1164 // need to zero these here in case H_DEREF fails 1165 1163 1166 H_DEREF(hf, VFile, vf); 1164 1167 if(vf_flags(vf) & VF_ZIP) 1165 CHECK_ERR(zip_unmap(&vf->zf));1168 return zip_map(&vf->zf, p, size); 1166 1169 else 1167 CHECK_ERR(file_unmap(&vf->f)); 1168 return 0; 1169 } 1170 return file_map(&vf->f, p, size); 1171 } 1172 1173 1174 // decrement the reference count for the mapping belonging to file <f>. 1175 // fail if there are no references; remove the mapping if the count reaches 0. 1176 // 1177 // the mapping will be removed (if still open) when its file is closed. 1178 // however, map/unmap calls should still be paired so that the mapping 1179 // may be removed when no longer needed. 1180 int vfs_unmap(const Handle hf) 1181 { 1182 H_DEREF(hf, VFile, vf); 1183 if(vf_flags(vf) & VF_ZIP) 1184 return zip_unmap(&vf->zf); 1185 else 1186 return file_unmap(&vf->f); 1187 } -
ps/trunk/source/lib/res/vfs.h
r371 r373 42 42 extern int vfs_close(Handle& h); 43 43 44 45 // 46 // memory mapping 47 // 48 49 // map the entire file <hf> into memory. if already currently mapped, 50 // return the previous mapping (reference-counted). 51 // output parameters are zeroed on failure. 52 // 53 // the mapping will be removed (if still open) when its file is closed. 54 // however, map/unmap calls should still be paired so that the mapping 55 // may be removed when no longer needed. 44 56 extern int vfs_map(Handle hf, uint flags, void*& p, size_t& size); 57 58 // decrement the reference count for the mapping belonging to file <f>. 59 // fail if there are no references; remove the mapping if the count reaches 0. 60 // 61 // the mapping will be removed (if still open) when its file is closed. 62 // however, map/unmap calls should still be paired so that the mapping 63 // may be removed when no longer needed. 45 64 extern int vfs_unmap(Handle hf); 46 65 66 67 // 68 // directory entry enumeration 69 // 47 70 48 71 struct vfsDirEnt … … 55 78 }; 56 79 57 58 80 extern Handle vfs_open_dir(const char* path); 59 81 extern int vfs_close_dir(Handle& hd); 60 82 extern int vfs_next_dirent(Handle hd, vfsDirEnt* ent, const char* filter); 61 62 63 64 65 extern int vfs_rebuild();66 67 68 83 69 84 … … 90 105 91 106 107 108 extern int vfs_rebuild(); 109 110 92 111 #endif // #ifndef __VFS_H__ -
ps/trunk/source/lib/res/zip.cpp
r371 r373 731 731 732 732 733 enum ZFileFlags 734 { 735 // the ZFile has been successfully zip_map-ped. 736 // we store this so that the archive mapping refcount remains balanced. 737 ZF_HAS_MAPPING = 0x4000 738 }; 739 733 740 // marker for ZFile struct, to make sure it's valid 741 #ifdef PARANOIA 734 742 static const u32 ZFILE_MAGIC = FOURCC('Z','F','I','L'); 743 #endif 735 744 736 745 … … 947 956 948 957 958 // map the entire file <zf> into memory. mapping compressed files 959 // isn't allowed, since the compression algorithm is unspecified. 960 // output parameters are zeroed on failure. 961 // 962 // the mapping will be removed (if still open) when its file is closed. 963 // however, map/unmap calls should still be paired so that the mapping 964 // may be removed when no longer needed. 949 965 int zip_map(ZFile* const zf, void*& p, size_t& size) 950 966 { 967 p = 0; 968 size = 0; 969 951 970 CHECK_ZFILE(zf) 952 971 953 // doesn't really make sense to map compressed files, so disallow it. 972 // mapping compressed files doesn't make sense because the 973 // compression algorithm is unspecified - disallow it. 954 974 if(is_compressed(zf)) 955 975 { 956 debug_warn(" mapping a compressed file from archive. why?");976 debug_warn("zip_map: file is compressed"); 957 977 return -1; 958 978 } 959 979 960 980 H_DEREF(zf->ha, ZArchive, za) 961 // increase refs 962 963 return file_map(&za->f, p, size); 964 } 965 966 981 CHECK_ERR(file_map(&za->f, p, size)); 982 983 zf->flags |= ZF_HAS_MAPPING; 984 return 0; 985 } 986 987 988 // remove the mapping of file <zf>; fail if not mapped. 989 // 990 // the mapping will be removed (if still open) when its archive is closed. 991 // however, map/unmap calls should be paired so that the archive mapping 992 // may be removed when no longer needed. 967 993 int zip_unmap(ZFile* const zf) 968 994 { 969 995 CHECK_ZFILE(zf) 996 997 // make sure archive mapping refcount remains balanced: 998 // don't allow multiple unmaps. 999 if(!(zf->flags & ZF_HAS_MAPPING)) 1000 return -1; 1001 zf->flags &= ~ZF_HAS_MAPPING; 1002 970 1003 H_DEREF(zf->ha, ZArchive, za) 971 // decrement refs 972 // unmap archive if 0 973 return 0; 974 } 1004 return file_unmap(&za->f); 1005 } -
ps/trunk/source/lib/res/zip.h
r371 r373 74 74 // it is accessed by VFS and must be the same for both (union). 75 75 // dirty, but necessary because VFile is pushing the HDATA size limit. 76 int flags;76 uint flags; 77 77 size_t ucsize; 78 78 // size of logical file … … 96 96 97 97 98 // 99 // memory mapping 100 // 101 102 // map the entire file <zf> into memory. mapping compressed files 103 // isn't allowed, since the compression algorithm is unspecified. 104 // output parameters are zeroed on failure. 105 // 106 // the mapping will be removed (if still open) when its archive is closed. 107 // however, map/unmap calls should still be paired so that the archive mapping 108 // may be removed when no longer needed. 98 109 extern int zip_map(ZFile* zf, void*& p, size_t& size); 110 111 // remove the mapping of file <zf>; fail if not mapped. 112 // 113 // the mapping will be removed (if still open) when its archive is closed. 114 // however, map/unmap calls should be paired so that the archive mapping 115 // may be removed when no longer needed. 99 116 extern int zip_unmap(ZFile* zf); 100 117 101 // read from file <zf>, starting at offset <ofs> in the compressed data 118 // read from file <zf>, starting at offset <ofs> in the compressed data. 102 119 extern ssize_t zip_read(ZFile* zf, off_t ofs, size_t size, void*& p); 103 120
Note:
See TracChangeset
for help on using the changeset viewer.
