- Timestamp:
- 06/02/04 22:41:05 (21 years ago)
- Location:
- ps/trunk/source/lib/res
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
ps/trunk/source/lib/res/file.cpp
r353 r371 189 189 const std::string name; 190 190 const uint flags; 191 const ssize_t size;192 193 DirEnt(const char* const _name, const uint _flags, const ssize_t _size)191 const off_t size; 192 193 DirEnt(const char* const _name, const uint _flags, const off_t _size) 194 194 : name(_name), flags(_flags), size(_size) {} 195 195 }; 196 196 197 // pointer to DirEnt: faster sorting, but more allocs. 197 198 typedef std::vector<const DirEnt*> DirEnts; 198 199 typedef DirEnts::const_iterator DirEntIt; … … 254 255 255 256 uint flags = 0; 256 ssize_t size = s.st_size;257 off_t size = s.st_size; 257 258 258 259 // dir … … 400 401 { 401 402 // don't stat if opening for writing - the file may not exist yet 402 size_t size = 0;403 off_t size = 0; 403 404 404 405 int mode = O_RDONLY; … … 467 468 468 469 469 int ll_start_io(File* f, size_t ofs, size_t size, void* p, ll_cb* lcb)470 int ll_start_io(File* f, off_t ofs, size_t size, void* p, ll_cb* lcb) 470 471 { 471 472 CHECK_FILE(f) … … 482 483 } 483 484 484 size_t bytes_left = f->size - ofs; // > 0485 off_t bytes_left = f->size - ofs; // > 0 485 486 int op = (f->flags & FILE_WRITE)? LIO_WRITE : LIO_READ; 486 487 487 // don't read beyond EOF 488 if(size > bytes_left) // avoid min() - it wants int 489 size = bytes_left; 488 // cut off at EOF. 489 // avoid min() due to type conversion warnings. 490 if((off_t)size > bytes_left) 491 size = (size_t)bytes_left; 492 // guaranteed to fit, since size was > bytes_left 490 493 491 494 aiocb* cb = &lcb->cb; … … 496 499 cb->aio_fildes = f->fd; 497 500 cb->aio_offset = (off_t)ofs; 498 cb->aio_nbytes = size;501 cb->aio_nbytes = (size_t)size; 499 502 return lio_listio(LIO_NOWAIT, &cb, 1, (struct sigevent*)0); 500 503 // this just issues the I/O - doesn't wait until complete. … … 537 540 // create an id for use with the Cache that uniquely identifies 538 541 // the block from the file <fn_hash> containing <ofs>. 539 static u64 block_make_id(const u32 fn_hash, const size_t ofs)542 static u64 block_make_id(const u32 fn_hash, const off_t ofs) 540 543 { 541 544 // id format: filename hash | block number … … 685 688 686 689 void* user_p; 687 size_t user_ofs;690 off_t user_ofs; 688 691 size_t user_size; 689 692 … … 701 704 static void IO_init(IO* io, va_list args) 702 705 { 703 size_tsize = round_up(sizeof(struct ll_cb), 16);704 io->cb = (ll_cb*)mem_alloc( size, 16, MEM_ZERO);706 const size_t cb_size = round_up(sizeof(struct ll_cb), 16); 707 io->cb = (ll_cb*)mem_alloc(cb_size, 16, MEM_ZERO); 705 708 } 706 709 … … 868 871 // transfers of more than 1 block (including padding) are allowed, but do not 869 872 // go through the cache. don't see any case where that's necessary, though. 870 Handle file_start_io(File* f, size_t user_ofs, size_t user_size, void* user_p)873 Handle file_start_io(File* f, off_t user_ofs, size_t user_size, void* user_p) 871 874 { 872 875 int err; … … 885 888 } 886 889 887 const size_t bytes_left = f->size - user_ofs; // > 0890 const off_t bytes_left = f->size - user_ofs; // > 0 888 891 int op = (f->flags & FILE_WRITE)? LIO_WRITE : LIO_READ; 889 892 890 893 // don't read beyond EOF 891 if(user_size > bytes_left) // avoid min() - it wants int 892 user_size = bytes_left; 894 if((off_t)user_size > bytes_left) // avoid min() - it wants int 895 user_size = (size_t)bytes_left; 896 // guaranteed to fit in user_size, since user_size > bytes_left 893 897 894 898 … … 920 924 // if not, no loss - the buffer will be LRU, and reused. 921 925 922 size_t ofs = user_ofs;926 off_t ofs = user_ofs; 923 927 size_t padding = ofs % BLOCK_SIZE; 924 ofs -= padding;928 ofs -= (off_t)padding; 925 929 size_t size = round_up(padding + user_size, BLOCK_SIZE); 926 930 … … 1061 1065 // return (positive) number of raw bytes transferred if successful; 1062 1066 // otherwise, an error code. 1063 ssize_t file_io(File* const f, const size_t raw_ofs, size_t raw_size, void** const p,1067 ssize_t file_io(File* const f, const off_t raw_ofs, size_t raw_size, void** const p, 1064 1068 const FILE_IO_CB cb, const uintptr_t ctx) // optional 1065 1069 { … … 1079 1083 if(!is_write) 1080 1084 { 1081 if(raw_ofs >= f->size) 1085 // cut off at EOF. 1086 // avoid min() due to type conversion warnings. 1087 off_t bytes_left = f->size - raw_ofs; 1088 if(bytes_left < 0) 1082 1089 return ERR_EOF; 1083 raw_size = MIN(f->size - raw_ofs, raw_size); 1090 if((off_t)raw_size > bytes_left) 1091 raw_size = (size_t)bytes_left; 1092 // guaranteed to fit, since size was > bytes_left 1084 1093 } 1085 1094 // writing: make sure buffer is valid … … 1099 1108 // not aligned! aio takes care of initial unalignment; 1100 1109 // next read will be aligned, because we read up to the next block. 1101 const size_t start_ofs = raw_ofs;1110 const off_t start_ofs = raw_ofs; 1102 1111 1103 1112 … … 1188 1197 // calculate issue_size: 1189 1198 // at most, transfer up to the next block boundary. 1190 size_t issue_ofs = start_ofs + issue_cnt;1199 off_t issue_ofs = (off_t)(start_ofs + issue_cnt); 1191 1200 const size_t left_in_block = BLOCK_SIZE - (issue_ofs % BLOCK_SIZE); 1192 1201 const size_t total_left = raw_size - issue_cnt; -
ps/trunk/source/lib/res/file.h
r353 r371 35 35 // dirty, but necessary because VFile is pushing the HDATA size limit. 36 36 int flags; 37 size_t size;37 off_t size; 38 38 39 39 u32 fn_hash; … … 92 92 extern int file_unmap(File* f); 93 93 94 extern Handle file_start_io(File* f, size_t ofs, size_t size, void* buf);94 extern Handle file_start_io(File* f, off_t ofs, size_t size, void* buf); 95 95 extern int file_wait_io(const Handle hio, void*& p, size_t& size); 96 96 extern int file_discard_io(Handle& hio); … … 104 104 typedef ssize_t(*FILE_IO_CB)(uintptr_t ctx, void* p, size_t size); 105 105 106 extern ssize_t file_io(File* f, size_t ofs, size_t size, void** p,106 extern ssize_t file_io(File* f, off_t ofs, size_t size, void** p, 107 107 FILE_IO_CB cb = 0, uintptr_t ctx = 0); 108 108 -
ps/trunk/source/lib/res/h_mgr.h
r334 r371 95 95 typedef H_VTbl* H_Type; 96 96 97 #define H_TYPE_DEFINE(t )\97 #define H_TYPE_DEFINE(type)\ 98 98 /* forward decls */\ 99 static void t ##_init(t*, va_list);\100 static int t ##_reload(t*, const char*);\101 static void t ##_dtor(t*);\102 static H_VTbl V_##t =\99 static void type##_init(type*, va_list);\ 100 static int type##_reload(type*, const char*);\ 101 static void type##_dtor(type*);\ 102 static H_VTbl V_##type =\ 103 103 {\ 104 (void(*)(void*, va_list))t ##_init,\105 (int(*)(void*, const char*))t ##_reload,\106 (void(*)(void*))t ##_dtor,\107 sizeof(t ), /* control block size */\108 #t /* name */\104 (void(*)(void*, va_list))type##_init,\ 105 (int(*)(void*, const char*))type##_reload,\ 106 (void(*)(void*))type##_dtor,\ 107 sizeof(type), /* control block size */\ 108 #type /* name */\ 109 109 };\ 110 static H_Type H_##t = &V_##t;110 static H_Type H_##type = &V_##type; 111 111 112 112 // note: we cast to void* pointers so the functions can be declared to … … 115 115 116 116 117 // <type>* <var> = H_USER_DATA(<h_var>, <type>) 118 #define H_USER_DATA(h, type) (type*)h_user_data(h, H_##type); 119 117 // convenience macro for h_user_data: 118 // casts its return value to the control block type. 119 // use if H_DEREF's returning a negative error code isn't acceptable. 120 #define H_USER_DATA(h, type) (type*)h_user_data(h, H_##type) 121 122 // even more convenient wrapper for h_user_data: 123 // declares a pointer (<var>), assigns it H_USER_DATA, and has 124 // the user's function return a negative error code on failure. 120 125 #define H_DEREF(h, type, var)\ 121 type* const var = (type*)h_user_data(h, H_##type);\ 126 /* don't use STMT - var decl must be visible to "caller" */\ 127 type* const var = H_USER_DATA(h, type);\ 122 128 if(!var)\ 123 129 return ERR_INVALID_HANDLE; … … 174 180 extern Handle h_find(H_Type type, uintptr_t key); 175 181 176 // return a pointer to handle data, or 0 on error 182 // returns a void* pointer to the control block of the resource <h>, 183 // or 0 on error (i.e. h is invalid or of the wrong type). 184 // prefer using H_DEREF or H_USER_DATA. 177 185 extern void* h_user_data(Handle h, H_Type type); 178 186 -
ps/trunk/source/lib/res/vfs.cpp
r362 r371 956 956 957 957 958 static size_t& vf_size(VFile* vf)958 static off_t& vf_size(VFile* vf) 959 959 { 960 960 assert(offsetof(struct File, size) == offsetof(struct ZFile, ucsize)); … … 1062 1062 1063 1063 1064 ssize_t vfs_io(Handle hf, size_t ofs, size_t size, void*& p)1064 ssize_t vfs_io(Handle hf, off_t ofs, size_t size, void*& p) 1065 1065 { 1066 1066 #ifdef PARANOIA … … 1147 1147 1148 1148 1149 Handle vfs_map(const char* fn, uint flags, void*& p, size_t& size) 1150 { 1151 Handle hf = vfs_open(fn, flags); 1149 int vfs_map(const Handle hf, uint flags, void*& p, size_t& size) 1150 { 1152 1151 H_DEREF(hf, VFile, vf); 1153 CHECK_ERR(file_map(&vf->f, p, size)); 1154 MEM_DTOR dtor = 0; 1155 uintptr_t ctx = 0; 1156 return mem_assign(p, size, 0, dtor, ctx); 1157 } 1158 1159 1160 int vfs_unmap(Handle& hm) 1161 { 1162 return -1; 1163 // return h_free(hm, H_MMap); 1164 } 1165 1152 1153 if(vf_flags(vf) & VF_ZIP) 1154 CHECK_ERR(zip_map(&vf->zf, p, size)); 1155 else 1156 CHECK_ERR(file_map(&vf->f, p, size)); 1157 return 0; 1158 } 1159 1160 1161 int vfs_unmap(Handle hf) 1162 { 1163 H_DEREF(hf, VFile, vf); 1164 if(vf_flags(vf) & VF_ZIP) 1165 CHECK_ERR(zip_unmap(&vf->zf)); 1166 else 1167 CHECK_ERR(file_unmap(&vf->f)); 1168 return 0; 1169 } -
ps/trunk/source/lib/res/vfs.h
r353 r371 42 42 extern int vfs_close(Handle& h); 43 43 44 extern Handle vfs_map(Handle hf, uint flags, void*& p, size_t& size); 44 extern int vfs_map(Handle hf, uint flags, void*& p, size_t& size); 45 extern int vfs_unmap(Handle hf); 45 46 46 47 … … 71 72 // 72 73 73 extern Handle vfs_start_read(const Handle hf, size_t ofs, size_t& advance, void* buf);74 extern Handle vfs_start_read(const Handle hf, off_t ofs, size_t& advance, void* buf); 74 75 extern int vfs_wait_read(Handle hr, void*& p, size_t& size); 75 76 extern int vfs_discard_read(Handle& hr); 76 77 77 extern ssize_t vfs_io(Handle hf, size_t ofs, size_t size, void*& p);78 extern ssize_t vfs_io(Handle hf, off_t ofs, size_t size, void*& p); 78 79 79 80 -
ps/trunk/source/lib/res/zip.cpp
r275 r371 47 47 struct ZFileLoc 48 48 { 49 size_t ofs;50 size_t csize; // = 0 if not compressed51 size_t ucsize;49 off_t ofs; 50 off_t csize; // = 0 if not compressed 51 off_t ucsize; 52 52 53 53 // why csize? … … 109 109 goto found_ecdr; 110 110 111 // check next 4 bytes ( nonaligned!!)111 // check next 4 bytes (unaligned!!) 112 112 ecdr++; 113 113 bytes_left--; … … 128 128 // make sure the LFH fields match those passed (from the CDFH). 129 129 // only used in PARANOIA builds - costs time when opening archives. 130 static int zip_verify_lfh(const void* const file, const size_t lfh_ofs, const size_t file_ofs)130 static int zip_verify_lfh(const void* const file, const off_t lfh_ofs, const off_t file_ofs) 131 131 { 132 132 const char lfh_id[] = "PK\3\4"; // signature … … 146 146 const u16 lfh_e_len = read_le16(lfh+28); 147 147 148 const size_t lfh_file_ofs = lfh_ofs + LFH_SIZE + lfh_fn_len + lfh_e_len;148 const off_t lfh_file_ofs = lfh_ofs + LFH_SIZE + lfh_fn_len + lfh_e_len; 149 149 150 150 if(file_ofs != lfh_file_ofs) … … 197 197 fn_len = fn_len_; 198 198 199 loc->ofs = lfh_ofs + LFH_SIZE + fn_len_ + e_len;200 loc->csize = csize_;201 loc->ucsize = ucsize_;199 loc->ofs = (off_t)(lfh_ofs + LFH_SIZE + fn_len_ + e_len); 200 loc->csize = (off_t)csize_; 201 loc->ucsize = (off_t)ucsize_; 202 202 203 203 // performance issue: want to avoid seeking between LFHs and central dir. … … 857 857 // can't fail - returned valid index above 858 858 859 s->st_size = (off_t)loc.ucsize;859 s->st_size = loc.ucsize; 860 860 return 0; 861 861 } … … 875 875 // note: we go to a bit of trouble to make sure the buffer we allocated 876 876 // (if p == 0) is freed when the read fails. 877 ssize_t zip_read(ZFile* zf, size_t raw_ofs, size_t size, void*& p)877 ssize_t zip_read(ZFile* zf, off_t raw_ofs, size_t size, void*& p) 878 878 { 879 879 CHECK_ZFILE(zf) … … 886 886 return ERR_INVALID_HANDLE; 887 887 888 const size_t ofs = zf->ofs + raw_ofs;888 const off_t ofs = zf->ofs + raw_ofs; 889 889 890 890 // not compressed - just pass it on to file_io … … 947 947 948 948 949 int zip_map(ZFile* zf, void*& p, size_t& size)949 int zip_map(ZFile* const zf, void*& p, size_t& size) 950 950 { 951 951 CHECK_ZFILE(zf) … … 959 959 960 960 H_DEREF(zf->ha, ZArchive, za) 961 // increase refs 962 961 963 return file_map(&za->f, p, size); 962 964 } 965 966 967 int zip_unmap(ZFile* const zf) 968 { 969 CHECK_ZFILE(zf) 970 H_DEREF(zf->ha, ZArchive, za) 971 // decrement refs 972 // unmap archive if 0 973 return 0; 974 } -
ps/trunk/source/lib/res/zip.h
r223 r371 78 78 // size of logical file 79 79 80 size_t ofs;81 size_t csize;82 size_t last_raw_ofs;80 off_t ofs; 81 off_t csize; 82 off_t last_raw_ofs; 83 83 84 84 Handle ha; … … 97 97 98 98 extern int zip_map(ZFile* zf, void*& p, size_t& size); 99 extern int zip_unmap(ZFile* zf); 99 100 100 extern ssize_t zip_read(ZFile* zf, size_t ofs, size_t size, void*& p); 101 102 103 //-------- 104 105 // read from file <hz>, starting at offset <ofs> in the compressed data 106 // (typically only used if the file is known to be stored). 107 // p == 0: allocate, read into, and return the output buffer 108 // p != 0: read into given output buffer, return handle to it 109 // if file is compressed, size must be >= uncompressed file size 110 // size: no input value, unless specifying an output buffer (p != 0) 111 // out: 112 113 114 115 116 //extern void* zip_mmap( 117 118 119 120 121 122 101 // read from file <zf>, starting at offset <ofs> in the compressed data 102 extern ssize_t zip_read(ZFile* zf, off_t ofs, size_t size, void*& p); 123 103 124 104
Note:
See TracChangeset
for help on using the changeset viewer.
