Line data Source code
1 : /**********************************************************************
2 : *
3 : * Name: cpl_virtualmem.cpp
4 : * Project: CPL - Common Portability Library
5 : * Purpose: Virtual memory
6 : * Author: Even Rouault, <even dot rouault at spatialys.com>
7 : *
8 : **********************************************************************
9 : * Copyright (c) 2014, Even Rouault <even dot rouault at spatialys.com>
10 : *
11 : * Permission is hereby granted, free of charge, to any person obtaining a
12 : * copy of this software and associated documentation files (the "Software"),
13 : * to deal in the Software without restriction, including without limitation
14 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 : * and/or sell copies of the Software, and to permit persons to whom the
16 : * Software is furnished to do so, subject to the following conditions:
17 : *
18 : * The above copyright notice and this permission notice shall be included
19 : * in all copies or substantial portions of the Software.
20 : *
21 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 : * DEALINGS IN THE SOFTWARE.
28 : ****************************************************************************/
29 :
30 : #ifndef _GNU_SOURCE
31 : #define _GNU_SOURCE
32 : #endif
33 :
34 : // to have off_t on 64bit possibly
35 : #ifndef _FILE_OFFSET_BITS
36 : #define _FILE_OFFSET_BITS 64
37 : #endif
38 :
39 : #include "cpl_virtualmem.h"
40 :
41 : #include <cassert>
42 : // TODO(schwehr): Should ucontext.h be included?
43 : // #include <ucontext.h>
44 :
45 : #include "cpl_atomic_ops.h"
46 : #include "cpl_config.h"
47 : #include "cpl_conv.h"
48 : #include "cpl_error.h"
49 : #include "cpl_multiproc.h"
50 :
51 : #ifdef NDEBUG
52 : // Non NDEBUG: Ignore the result.
53 : #define IGNORE_OR_ASSERT_IN_DEBUG(expr) CPL_IGNORE_RET_VAL((expr))
54 : #else
55 : // Debug: Assert.
56 : #define IGNORE_OR_ASSERT_IN_DEBUG(expr) assert((expr))
57 : #endif
58 :
59 : #if defined(__linux) && defined(CPL_MULTIPROC_PTHREAD)
60 : #ifndef HAVE_5ARGS_MREMAP
61 : // FIXME? gcore/virtualmem.py tests fail/crash when HAVE_5ARGS_MREMAP
62 : // is not defined.
63 : #warning "HAVE_5ARGS_MREMAP not found. Disabling HAVE_VIRTUAL_MEM_VMA"
64 : #else
65 : #define HAVE_VIRTUAL_MEM_VMA
66 : #endif
67 : #endif
68 :
69 : #if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
70 : #include <unistd.h> // read, write, close, pipe, sysconf
71 : #include <sys/mman.h> // mmap, munmap, mremap
72 : #endif
73 :
74 : typedef enum
75 : {
76 : VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED,
77 : VIRTUAL_MEM_TYPE_VMA
78 : } CPLVirtualMemType;
79 :
80 : struct CPLVirtualMem
81 : {
82 : CPLVirtualMemType eType;
83 :
84 : struct CPLVirtualMem *pVMemBase;
85 : int nRefCount;
86 :
87 : CPLVirtualMemAccessMode eAccessMode;
88 :
89 : size_t nPageSize;
90 : // Aligned on nPageSize.
91 : void *pData;
92 : // Returned by mmap(), potentially lower than pData.
93 : void *pDataToFree;
94 : // Requested size (unrounded).
95 : size_t nSize;
96 :
97 : bool bSingleThreadUsage;
98 :
99 : void *pCbkUserData;
100 : CPLVirtualMemFreeUserData pfnFreeUserData;
101 : };
102 :
103 : #ifdef HAVE_VIRTUAL_MEM_VMA
104 :
105 : #include <sys/select.h> // select
106 : #include <sys/stat.h> // open()
107 : #include <sys/types.h> // open()
108 : #include <errno.h>
109 : #include <fcntl.h> // open()
110 : #include <signal.h> // sigaction
111 : #include <stdio.h>
112 : #include <stdlib.h>
113 : #include <string.h>
114 : #include <pthread.h>
115 :
116 : #ifndef HAVE_5ARGS_MREMAP
117 : #include "cpl_atomic_ops.h"
118 : #endif
119 :
120 : /* Linux specific (i.e. non POSIX compliant) features used:
121 : - returning from a SIGSEGV handler is clearly a POSIX violation, but in
122 : practice most POSIX systems should be happy.
123 : - mremap() with 5 args is Linux specific. It is used when the user
124 : callback is invited to fill a page, we currently mmap() a
125 : writable page, let it filled it, and afterwards mremap() that
126 : temporary page onto the location where the fault occurred.
127 : If we have no mremap(), the workaround is to pause other threads that
128 : consume the current view while we are updating the faulted page, otherwise
129 : a non-paused thread could access a page that is in the middle of being
130 : filled... The way we pause those threads is quite original : we send them
131 : a SIGUSR1 and wait that they are stuck in the temporary SIGUSR1 handler...
132 : - MAP_ANONYMOUS isn't documented in POSIX, but very commonly found
133 : (sometimes called MAP_ANON)
134 : - dealing with the limitation of number of memory mapping regions,
135 : and the 65536 limit.
136 : - other things I've not identified
137 : */
138 :
139 : #define ALIGN_DOWN(p, pagesize) \
140 : reinterpret_cast<void *>((reinterpret_cast<GUIntptr_t>(p)) / (pagesize) * \
141 : (pagesize))
142 : #define ALIGN_UP(p, pagesize) \
143 : reinterpret_cast<void *>( \
144 : (reinterpret_cast<GUIntptr_t>(p) + (pagesize)-1) / (pagesize) * \
145 : (pagesize))
146 :
147 : #define DEFAULT_PAGE_SIZE (256 * 256)
148 : #define MAXIMUM_PAGE_SIZE (32 * 1024 * 1024)
149 :
150 : // Linux Kernel limit.
151 : #define MAXIMUM_COUNT_OF_MAPPINGS 65536
152 :
153 : #define BYEBYE_ADDR (reinterpret_cast<void *>(~static_cast<size_t>(0)))
154 :
155 : #define MAPPING_FOUND "yeah"
156 : #define MAPPING_NOT_FOUND "doh!"
157 :
158 : #define SET_BIT(ar, bitnumber) ar[(bitnumber) / 8] |= 1 << ((bitnumber) % 8)
159 : #define UNSET_BIT(ar, bitnumber) \
160 : ar[(bitnumber) / 8] &= ~(1 << ((bitnumber) % 8))
161 : #define TEST_BIT(ar, bitnumber) (ar[(bitnumber) / 8] & (1 << ((bitnumber) % 8)))
162 :
163 : typedef enum
164 : {
165 : OP_LOAD,
166 : OP_STORE,
167 : OP_MOVS_RSI_RDI,
168 : OP_UNKNOWN
169 : } OpType;
170 :
171 : typedef struct
172 : {
173 : CPLVirtualMem sBase;
174 :
175 : GByte *pabitMappedPages;
176 : GByte *pabitRWMappedPages;
177 :
178 : int nCacheMaxSizeInPages; // Maximum size of page array.
179 : int *panLRUPageIndices; // Array with indices of cached pages.
180 : int iLRUStart; // Index in array where to
181 : // write next page index.
182 : int nLRUSize; // Current size of the array.
183 :
184 : int iLastPage; // Last page accessed.
185 : int nRetry; // Number of consecutive
186 : // retries to that last page.
187 :
188 : CPLVirtualMemCachePageCbk pfnCachePage; // Called when a page is
189 : // mapped.
190 : CPLVirtualMemUnCachePageCbk pfnUnCachePage; // Called when a (writable)
191 : // page is unmapped.
192 :
193 : #ifndef HAVE_5ARGS_MREMAP
194 : CPLMutex *hMutexThreadArray;
195 : int nThreads;
196 : pthread_t *pahThreads;
197 : #endif
198 : } CPLVirtualMemVMA;
199 :
200 : typedef struct
201 : {
202 : // hVirtualMemManagerMutex protects the 2 following variables.
203 : CPLVirtualMemVMA **pasVirtualMem;
204 : int nVirtualMemCount;
205 :
206 : int pipefd_to_thread[2];
207 : int pipefd_from_thread[2];
208 : int pipefd_wait_thread[2];
209 : CPLJoinableThread *hHelperThread;
210 :
211 : // Using sigaction without testing HAVE_SIGACTION since we are in a Linux
212 : // specific code path
213 : struct sigaction oldact;
214 : } CPLVirtualMemManager;
215 :
216 : typedef struct
217 : {
218 : void *pFaultAddr;
219 : OpType opType;
220 : pthread_t hRequesterThread;
221 : } CPLVirtualMemMsgToWorkerThread;
222 :
223 : // TODO: Singletons.
224 : static CPLVirtualMemManager *pVirtualMemManager = nullptr;
225 : static CPLMutex *hVirtualMemManagerMutex = nullptr;
226 :
227 : static bool CPLVirtualMemManagerInit();
228 :
229 : #ifdef DEBUG_VIRTUALMEM
230 :
231 : /************************************************************************/
232 : /* fprintfstderr() */
233 : /************************************************************************/
234 :
235 : // This function may be called from signal handlers where most functions
236 : // from the C library are unsafe to be called. fprintf() is clearly one
237 : // of those functions (see
238 : // http://stackoverflow.com/questions/4554129/linux-glibc-can-i-use-fprintf-in-signal-handler)
239 : // vsnprintf() is *probably* safer with respect to that (but there is no
240 : // guarantee though).
241 : // write() is async-signal-safe.
242 : static void fprintfstderr(const char *fmt, ...)
243 : {
244 : char buffer[80] = {};
245 : va_list ap;
246 : va_start(ap, fmt);
247 : vsnprintf(buffer, sizeof(buffer), fmt, ap);
248 : va_end(ap);
249 : int offset = 0;
250 : while (true)
251 : {
252 : const size_t nSizeToWrite = strlen(buffer + offset);
253 : int ret = static_cast<int>(write(2, buffer + offset, nSizeToWrite));
254 : if (ret < 0 && errno == EINTR)
255 : {
256 : }
257 : else
258 : {
259 : if (ret == static_cast<int>(nSizeToWrite))
260 : break;
261 : offset += ret;
262 : }
263 : }
264 : }
265 :
266 : #endif
267 :
268 : /************************************************************************/
269 : /* CPLVirtualMemManagerRegisterVirtualMem() */
270 : /************************************************************************/
271 :
272 17 : static bool CPLVirtualMemManagerRegisterVirtualMem(CPLVirtualMemVMA *ctxt)
273 : {
274 17 : if (!CPLVirtualMemManagerInit())
275 0 : return false;
276 :
277 17 : bool bSuccess = true;
278 17 : IGNORE_OR_ASSERT_IN_DEBUG(ctxt);
279 17 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
280 : CPLVirtualMemVMA **pasVirtualMemNew = static_cast<CPLVirtualMemVMA **>(
281 17 : VSI_REALLOC_VERBOSE(pVirtualMemManager->pasVirtualMem,
282 : sizeof(CPLVirtualMemVMA *) *
283 : (pVirtualMemManager->nVirtualMemCount + 1)));
284 17 : if (pasVirtualMemNew == nullptr)
285 : {
286 0 : bSuccess = false;
287 : }
288 : else
289 : {
290 17 : pVirtualMemManager->pasVirtualMem = pasVirtualMemNew;
291 : pVirtualMemManager
292 17 : ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount] = ctxt;
293 17 : pVirtualMemManager->nVirtualMemCount++;
294 : }
295 17 : CPLReleaseMutex(hVirtualMemManagerMutex);
296 17 : return bSuccess;
297 : }
298 :
299 : /************************************************************************/
300 : /* CPLVirtualMemManagerUnregisterVirtualMem() */
301 : /************************************************************************/
302 :
303 17 : static void CPLVirtualMemManagerUnregisterVirtualMem(CPLVirtualMemVMA *ctxt)
304 : {
305 17 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
306 23 : for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
307 : {
308 23 : if (pVirtualMemManager->pasVirtualMem[i] == ctxt)
309 : {
310 17 : if (i < pVirtualMemManager->nVirtualMemCount - 1)
311 : {
312 9 : memmove(pVirtualMemManager->pasVirtualMem + i,
313 9 : pVirtualMemManager->pasVirtualMem + i + 1,
314 : sizeof(CPLVirtualMem *) *
315 9 : (pVirtualMemManager->nVirtualMemCount - i - 1));
316 : }
317 17 : pVirtualMemManager->nVirtualMemCount--;
318 17 : break;
319 : }
320 : }
321 17 : CPLReleaseMutex(hVirtualMemManagerMutex);
322 17 : }
323 :
324 : /************************************************************************/
325 : /* CPLVirtualMemNew() */
326 : /************************************************************************/
327 :
328 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt);
329 :
330 17 : CPLVirtualMem *CPLVirtualMemNew(size_t nSize, size_t nCacheSize,
331 : size_t nPageSizeHint, int bSingleThreadUsage,
332 : CPLVirtualMemAccessMode eAccessMode,
333 : CPLVirtualMemCachePageCbk pfnCachePage,
334 : CPLVirtualMemUnCachePageCbk pfnUnCachePage,
335 : CPLVirtualMemFreeUserData pfnFreeUserData,
336 : void *pCbkUserData)
337 : {
338 17 : size_t nMinPageSize = CPLGetPageSize();
339 17 : size_t nPageSize = DEFAULT_PAGE_SIZE;
340 :
341 17 : IGNORE_OR_ASSERT_IN_DEBUG(nSize > 0);
342 17 : IGNORE_OR_ASSERT_IN_DEBUG(pfnCachePage != nullptr);
343 :
344 17 : if (nPageSizeHint >= nMinPageSize && nPageSizeHint <= MAXIMUM_PAGE_SIZE)
345 : {
346 5 : if ((nPageSizeHint % nMinPageSize) == 0)
347 5 : nPageSize = nPageSizeHint;
348 : else
349 : {
350 0 : int nbits = 0;
351 0 : nPageSize = static_cast<size_t>(nPageSizeHint);
352 0 : do
353 : {
354 0 : nPageSize >>= 1;
355 0 : nbits++;
356 0 : } while (nPageSize > 0);
357 0 : nPageSize = static_cast<size_t>(1) << (nbits - 1);
358 0 : if (nPageSize < static_cast<size_t>(nPageSizeHint))
359 0 : nPageSize <<= 1;
360 : }
361 : }
362 :
363 17 : if ((nPageSize % nMinPageSize) != 0)
364 0 : nPageSize = nMinPageSize;
365 :
366 17 : if (nCacheSize > nSize)
367 16 : nCacheSize = nSize;
368 1 : else if (nCacheSize == 0)
369 0 : nCacheSize = 1;
370 :
371 17 : int nMappings = 0;
372 :
373 : // Linux specific:
374 : // Count the number of existing memory mappings.
375 17 : FILE *f = fopen("/proc/self/maps", "rb");
376 17 : if (f != nullptr)
377 : {
378 17 : char buffer[80] = {};
379 37811 : while (fgets(buffer, sizeof(buffer), f) != nullptr)
380 37794 : nMappings++;
381 17 : fclose(f);
382 : }
383 :
384 17 : size_t nCacheMaxSizeInPages = 0;
385 : while (true)
386 : {
387 : // /proc/self/maps must not have more than 65K lines.
388 17 : nCacheMaxSizeInPages = (nCacheSize + 2 * nPageSize - 1) / nPageSize;
389 17 : if (nCacheMaxSizeInPages >
390 17 : static_cast<size_t>((MAXIMUM_COUNT_OF_MAPPINGS * 9 / 10) -
391 : nMappings))
392 0 : nPageSize <<= 1;
393 : else
394 17 : break;
395 : }
396 17 : size_t nRoundedMappingSize =
397 17 : ((nSize + 2 * nPageSize - 1) / nPageSize) * nPageSize;
398 17 : void *pData = mmap(nullptr, nRoundedMappingSize, PROT_NONE,
399 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
400 17 : if (pData == MAP_FAILED)
401 : {
402 0 : perror("mmap");
403 0 : return nullptr;
404 : }
405 : CPLVirtualMemVMA *ctxt = static_cast<CPLVirtualMemVMA *>(
406 17 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMemVMA)));
407 17 : if (ctxt == nullptr)
408 : {
409 0 : munmap(pData, nRoundedMappingSize);
410 0 : return nullptr;
411 : }
412 17 : ctxt->sBase.nRefCount = 1;
413 17 : ctxt->sBase.eType = VIRTUAL_MEM_TYPE_VMA;
414 17 : ctxt->sBase.eAccessMode = eAccessMode;
415 17 : ctxt->sBase.pDataToFree = pData;
416 17 : ctxt->sBase.pData = ALIGN_UP(pData, nPageSize);
417 17 : ctxt->sBase.nPageSize = nPageSize;
418 17 : ctxt->sBase.nSize = nSize;
419 17 : ctxt->sBase.bSingleThreadUsage = CPL_TO_BOOL(bSingleThreadUsage);
420 17 : ctxt->sBase.pfnFreeUserData = pfnFreeUserData;
421 17 : ctxt->sBase.pCbkUserData = pCbkUserData;
422 :
423 17 : ctxt->pabitMappedPages = static_cast<GByte *>(
424 17 : VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
425 17 : if (ctxt->pabitMappedPages == nullptr)
426 : {
427 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
428 0 : CPLFree(ctxt);
429 0 : return nullptr;
430 : }
431 17 : ctxt->pabitRWMappedPages = static_cast<GByte *>(
432 17 : VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
433 17 : if (ctxt->pabitRWMappedPages == nullptr)
434 : {
435 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
436 0 : CPLFree(ctxt);
437 0 : return nullptr;
438 : }
439 : // Need at least 2 pages in case for a rep movs instruction
440 : // that operate in the view.
441 17 : ctxt->nCacheMaxSizeInPages = static_cast<int>(nCacheMaxSizeInPages);
442 17 : ctxt->panLRUPageIndices = static_cast<int *>(
443 17 : VSI_MALLOC_VERBOSE(ctxt->nCacheMaxSizeInPages * sizeof(int)));
444 17 : if (ctxt->panLRUPageIndices == nullptr)
445 : {
446 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
447 0 : CPLFree(ctxt);
448 0 : return nullptr;
449 : }
450 17 : ctxt->iLRUStart = 0;
451 17 : ctxt->nLRUSize = 0;
452 17 : ctxt->iLastPage = -1;
453 17 : ctxt->nRetry = 0;
454 17 : ctxt->pfnCachePage = pfnCachePage;
455 17 : ctxt->pfnUnCachePage = pfnUnCachePage;
456 :
457 : #ifndef HAVE_5ARGS_MREMAP
458 : if (!ctxt->sBase.bSingleThreadUsage)
459 : {
460 : ctxt->hMutexThreadArray = CPLCreateMutex();
461 : IGNORE_OR_ASSERT_IN_DEBUG(ctxt->hMutexThreadArray != nullptr);
462 : CPLReleaseMutex(ctxt->hMutexThreadArray);
463 : ctxt->nThreads = 0;
464 : ctxt->pahThreads = nullptr;
465 : }
466 : #endif
467 :
468 17 : if (!CPLVirtualMemManagerRegisterVirtualMem(ctxt))
469 : {
470 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
471 0 : CPLFree(ctxt);
472 0 : return nullptr;
473 : }
474 :
475 17 : return reinterpret_cast<CPLVirtualMem *>(ctxt);
476 : }
477 :
478 : /************************************************************************/
479 : /* CPLVirtualMemFreeFileMemoryMapped() */
480 : /************************************************************************/
481 :
482 17 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt)
483 : {
484 17 : CPLVirtualMemManagerUnregisterVirtualMem(ctxt);
485 :
486 17 : size_t nRoundedMappingSize =
487 17 : ((ctxt->sBase.nSize + 2 * ctxt->sBase.nPageSize - 1) /
488 17 : ctxt->sBase.nPageSize) *
489 17 : ctxt->sBase.nPageSize;
490 17 : if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
491 7 : ctxt->pabitRWMappedPages != nullptr && ctxt->pfnUnCachePage != nullptr)
492 : {
493 27 : for (size_t i = 0; i < nRoundedMappingSize / ctxt->sBase.nPageSize; i++)
494 : {
495 20 : if (TEST_BIT(ctxt->pabitRWMappedPages, i))
496 : {
497 13 : void *addr = static_cast<char *>(ctxt->sBase.pData) +
498 13 : i * ctxt->sBase.nPageSize;
499 13 : ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
500 13 : i * ctxt->sBase.nPageSize, addr,
501 : ctxt->sBase.nPageSize,
502 : ctxt->sBase.pCbkUserData);
503 : }
504 : }
505 : }
506 17 : int nRet = munmap(ctxt->sBase.pDataToFree, nRoundedMappingSize);
507 17 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
508 17 : CPLFree(ctxt->pabitMappedPages);
509 17 : CPLFree(ctxt->pabitRWMappedPages);
510 17 : CPLFree(ctxt->panLRUPageIndices);
511 : #ifndef HAVE_5ARGS_MREMAP
512 : if (!ctxt->sBase.bSingleThreadUsage)
513 : {
514 : CPLFree(ctxt->pahThreads);
515 : CPLDestroyMutex(ctxt->hMutexThreadArray);
516 : }
517 : #endif
518 17 : }
519 :
520 : #ifndef HAVE_5ARGS_MREMAP
521 :
522 : static volatile int nCountThreadsInSigUSR1 = 0;
523 : static volatile int nWaitHelperThread = 0;
524 :
525 : /************************************************************************/
526 : /* CPLVirtualMemSIGUSR1Handler() */
527 : /************************************************************************/
528 :
529 : static void CPLVirtualMemSIGUSR1Handler(int /* signum_unused */,
530 : siginfo_t * /* the_info_unused */,
531 : void * /* the_ctxt_unused */)
532 : {
533 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
534 : fprintfstderr("entering CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
535 : #endif
536 : // Rouault guesses this is only POSIX correct if it is implemented by an
537 : // intrinsic.
538 : CPLAtomicInc(&nCountThreadsInSigUSR1);
539 : while (nWaitHelperThread)
540 : // Not explicitly indicated as signal-async-safe, but hopefully ok.
541 : usleep(1);
542 : CPLAtomicDec(&nCountThreadsInSigUSR1);
543 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
544 : fprintfstderr("leaving CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
545 : #endif
546 : }
547 : #endif
548 :
549 : /************************************************************************/
550 : /* CPLVirtualMemDeclareThread() */
551 : /************************************************************************/
552 :
553 2 : void CPLVirtualMemDeclareThread(CPLVirtualMem *ctxt)
554 : {
555 2 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
556 0 : return;
557 : #ifndef HAVE_5ARGS_MREMAP
558 : CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
559 : IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
560 : CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
561 : ctxtVMA->pahThreads = static_cast<pthread_t *>(CPLRealloc(
562 : ctxtVMA->pahThreads, (ctxtVMA->nThreads + 1) * sizeof(pthread_t)));
563 : ctxtVMA->pahThreads[ctxtVMA->nThreads] = pthread_self();
564 : ctxtVMA->nThreads++;
565 :
566 : CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
567 : #endif
568 : }
569 :
570 : /************************************************************************/
571 : /* CPLVirtualMemUnDeclareThread() */
572 : /************************************************************************/
573 :
574 2 : void CPLVirtualMemUnDeclareThread(CPLVirtualMem *ctxt)
575 : {
576 2 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
577 0 : return;
578 : #ifndef HAVE_5ARGS_MREMAP
579 : CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
580 : pthread_t self = pthread_self();
581 : IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
582 : CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
583 : for (int i = 0; i < ctxtVMA->nThreads; i++)
584 : {
585 : if (ctxtVMA->pahThreads[i] == self)
586 : {
587 : if (i < ctxtVMA->nThreads - 1)
588 : memmove(ctxtVMA->pahThreads + i + 1, ctxtVMA->pahThreads + i,
589 : (ctxtVMA->nThreads - 1 - i) * sizeof(pthread_t));
590 : ctxtVMA->nThreads--;
591 : break;
592 : }
593 : }
594 :
595 : CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
596 : #endif
597 : }
598 :
599 : /************************************************************************/
600 : /* CPLVirtualMemGetPageToFill() */
601 : /************************************************************************/
602 :
603 : // Must be paired with CPLVirtualMemAddPage.
604 64207 : static void *CPLVirtualMemGetPageToFill(CPLVirtualMemVMA *ctxt,
605 : void *start_page_addr)
606 : {
607 64207 : void *pPageToFill = nullptr;
608 :
609 64207 : if (ctxt->sBase.bSingleThreadUsage)
610 : {
611 0 : pPageToFill = start_page_addr;
612 0 : const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
613 : PROT_READ | PROT_WRITE);
614 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
615 : }
616 : else
617 : {
618 : #ifndef HAVE_5ARGS_MREMAP
619 : CPLAcquireMutex(ctxt->hMutexThreadArray, 1000.0);
620 : if (ctxt->nThreads == 1)
621 : {
622 : pPageToFill = start_page_addr;
623 : const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
624 : PROT_READ | PROT_WRITE);
625 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
626 : }
627 : else
628 : #endif
629 : {
630 : // Allocate a temporary writable page that the user
631 : // callback can fill.
632 : pPageToFill =
633 64207 : mmap(nullptr, ctxt->sBase.nPageSize, PROT_READ | PROT_WRITE,
634 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
635 64207 : IGNORE_OR_ASSERT_IN_DEBUG(pPageToFill != MAP_FAILED);
636 : }
637 : }
638 64207 : return pPageToFill;
639 : }
640 :
641 : /************************************************************************/
642 : /* CPLVirtualMemAddPage() */
643 : /************************************************************************/
644 :
645 64207 : static void CPLVirtualMemAddPage(CPLVirtualMemVMA *ctxt, void *target_addr,
646 : void *pPageToFill, OpType opType,
647 : pthread_t hRequesterThread)
648 : {
649 64207 : const int iPage =
650 64207 : static_cast<int>((static_cast<char *>(target_addr) -
651 64207 : static_cast<char *>(ctxt->sBase.pData)) /
652 64207 : ctxt->sBase.nPageSize);
653 64207 : if (ctxt->nLRUSize == ctxt->nCacheMaxSizeInPages)
654 : {
655 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
656 : fprintfstderr("uncaching page %d\n", iPage);
657 : #endif
658 64128 : int nOldPage = ctxt->panLRUPageIndices[ctxt->iLRUStart];
659 64128 : void *addr = static_cast<char *>(ctxt->sBase.pData) +
660 64128 : nOldPage * ctxt->sBase.nPageSize;
661 64128 : if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
662 0 : ctxt->pfnUnCachePage != nullptr &&
663 0 : TEST_BIT(ctxt->pabitRWMappedPages, nOldPage))
664 : {
665 0 : size_t nToBeEvicted = ctxt->sBase.nPageSize;
666 0 : if (static_cast<char *>(addr) + nToBeEvicted >=
667 0 : static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
668 0 : nToBeEvicted = static_cast<char *>(ctxt->sBase.pData) +
669 0 : ctxt->sBase.nSize - static_cast<char *>(addr);
670 :
671 0 : ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
672 0 : nOldPage * ctxt->sBase.nPageSize, addr,
673 : nToBeEvicted, ctxt->sBase.pCbkUserData);
674 : }
675 : // "Free" the least recently used page.
676 64128 : UNSET_BIT(ctxt->pabitMappedPages, nOldPage);
677 64128 : UNSET_BIT(ctxt->pabitRWMappedPages, nOldPage);
678 : // Free the old page.
679 : // Not sure how portable it is to do that that way.
680 : const void *const pRet =
681 64128 : mmap(addr, ctxt->sBase.nPageSize, PROT_NONE,
682 : MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
683 64128 : IGNORE_OR_ASSERT_IN_DEBUG(pRet == addr);
684 : // cppcheck-suppress memleak
685 : }
686 64207 : ctxt->panLRUPageIndices[ctxt->iLRUStart] = iPage;
687 64207 : ctxt->iLRUStart = (ctxt->iLRUStart + 1) % ctxt->nCacheMaxSizeInPages;
688 64207 : if (ctxt->nLRUSize < ctxt->nCacheMaxSizeInPages)
689 : {
690 79 : ctxt->nLRUSize++;
691 : }
692 64207 : SET_BIT(ctxt->pabitMappedPages, iPage);
693 :
694 64207 : if (ctxt->sBase.bSingleThreadUsage)
695 : {
696 0 : if (opType == OP_STORE &&
697 0 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
698 : {
699 : // Let (and mark) the page writable since the instruction that
700 : // triggered the fault is a store.
701 0 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
702 : }
703 0 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
704 : {
705 : const int nRet =
706 0 : mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
707 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
708 : }
709 : }
710 : else
711 : {
712 : #ifdef HAVE_5ARGS_MREMAP
713 : (void)hRequesterThread;
714 :
715 64207 : if (opType == OP_STORE &&
716 8 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
717 : {
718 : // Let (and mark) the page writable since the instruction that
719 : // triggered the fault is a store.
720 8 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
721 : }
722 64199 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
723 : {
724 : // Turn the temporary page read-only before remapping it.
725 : // Only turn it writtable when a new fault occurs (and the
726 : // mapping is writable).
727 : const int nRet =
728 69 : mprotect(pPageToFill, ctxt->sBase.nPageSize, PROT_READ);
729 69 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
730 : }
731 : /* Can now remap the pPageToFill onto the target page */
732 : const void *const pRet =
733 64207 : mremap(pPageToFill, ctxt->sBase.nPageSize, ctxt->sBase.nPageSize,
734 : MREMAP_MAYMOVE | MREMAP_FIXED, target_addr);
735 64207 : IGNORE_OR_ASSERT_IN_DEBUG(pRet == target_addr);
736 :
737 : #else
738 : if (ctxt->nThreads > 1)
739 : {
740 : /* Pause threads that share this mem view */
741 : CPLAtomicInc(&nWaitHelperThread);
742 :
743 : /* Install temporary SIGUSR1 signal handler */
744 : struct sigaction act, oldact;
745 : act.sa_sigaction = CPLVirtualMemSIGUSR1Handler;
746 : sigemptyset(&act.sa_mask);
747 : /* We don't want the sigsegv handler to be called when we are */
748 : /* running the sigusr1 handler */
749 : IGNORE_OR_ASSERT_IN_DEBUG(sigaddset(&act.sa_mask, SIGSEGV) == 0);
750 : act.sa_flags = 0;
751 : IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &act, &oldact) == 0);
752 :
753 : for (int i = 0; i < ctxt->nThreads; i++)
754 : {
755 : if (ctxt->pahThreads[i] != hRequesterThread)
756 : {
757 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
758 : fprintfstderr("stopping thread %X\n", ctxt->pahThreads[i]);
759 : #endif
760 : IGNORE_OR_ASSERT_IN_DEBUG(
761 : pthread_kill(ctxt->pahThreads[i], SIGUSR1) == 0);
762 : }
763 : }
764 :
765 : /* Wait that they are all paused */
766 : while (nCountThreadsInSigUSR1 != ctxt->nThreads - 1)
767 : usleep(1);
768 :
769 : /* Restore old SIGUSR1 signal handler */
770 : IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &oldact, nullptr) ==
771 : 0);
772 :
773 : int nRet = mprotect(target_addr, ctxt->sBase.nPageSize,
774 : PROT_READ | PROT_WRITE);
775 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
776 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
777 : fprintfstderr("memcpying page %d\n", iPage);
778 : #endif
779 : memcpy(target_addr, pPageToFill, ctxt->sBase.nPageSize);
780 :
781 : if (opType == OP_STORE &&
782 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
783 : {
784 : // Let (and mark) the page writable since the instruction that
785 : // triggered the fault is a store.
786 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
787 : }
788 : else
789 : {
790 : nRet = mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
791 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
792 : }
793 :
794 : /* Wake up sleeping threads */
795 : CPLAtomicDec(&nWaitHelperThread);
796 : while (nCountThreadsInSigUSR1 != 0)
797 : usleep(1);
798 :
799 : IGNORE_OR_ASSERT_IN_DEBUG(
800 : munmap(pPageToFill, ctxt->sBase.nPageSize) == 0);
801 : }
802 : else
803 : {
804 : if (opType == OP_STORE &&
805 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
806 : {
807 : // Let (and mark) the page writable since the instruction that
808 : // triggered the fault is a store.
809 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
810 : }
811 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
812 : {
813 : const int nRet2 =
814 : mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
815 : IGNORE_OR_ASSERT_IN_DEBUG(nRet2 == 0);
816 : }
817 : }
818 :
819 : CPLReleaseMutex(ctxt->hMutexThreadArray);
820 : #endif
821 : }
822 : // cppcheck-suppress memleak
823 64207 : }
824 :
825 : /************************************************************************/
826 : /* CPLVirtualMemGetOpTypeImm() */
827 : /************************************************************************/
828 :
829 : #if defined(__x86_64__) || defined(__i386__)
830 0 : static OpType CPLVirtualMemGetOpTypeImm(GByte val_rip)
831 : {
832 0 : OpType opType = OP_UNKNOWN;
833 0 : if ((/*val_rip >= 0x00 &&*/ val_rip <= 0x07) ||
834 0 : (val_rip >= 0x40 && val_rip <= 0x47)) // add $, (X)
835 0 : opType = OP_STORE;
836 0 : if ((val_rip >= 0x08 && val_rip <= 0x0f) ||
837 0 : (val_rip >= 0x48 && val_rip <= 0x4f)) // or $, (X)
838 0 : opType = OP_STORE;
839 0 : if ((val_rip >= 0x20 && val_rip <= 0x27) ||
840 0 : (val_rip >= 0x60 && val_rip <= 0x67)) // and $, (X)
841 0 : opType = OP_STORE;
842 0 : if ((val_rip >= 0x28 && val_rip <= 0x2f) ||
843 0 : (val_rip >= 0x68 && val_rip <= 0x6f)) // sub $, (X)
844 0 : opType = OP_STORE;
845 0 : if ((val_rip >= 0x30 && val_rip <= 0x37) ||
846 0 : (val_rip >= 0x70 && val_rip <= 0x77)) // xor $, (X)
847 0 : opType = OP_STORE;
848 0 : if ((val_rip >= 0x38 && val_rip <= 0x3f) ||
849 0 : (val_rip >= 0x78 && val_rip <= 0x7f)) // cmp $, (X)
850 0 : opType = OP_LOAD;
851 0 : return opType;
852 : }
853 : #endif
854 :
855 : /************************************************************************/
856 : /* CPLVirtualMemGetOpType() */
857 : /************************************************************************/
858 :
859 : // Don't need exhaustivity. It is just a hint for an optimization:
860 : // If the fault occurs on a store operation, then we can directly put
861 : // the page in writable mode if the mapping allows it.
862 :
863 : #if defined(__x86_64__) || defined(__i386__)
864 100865 : static OpType CPLVirtualMemGetOpType(const GByte *rip)
865 : {
866 100865 : OpType opType = OP_UNKNOWN;
867 :
868 : #if defined(__x86_64__) || defined(__i386__)
869 100865 : switch (rip[0])
870 : {
871 0 : case 0x00: /* add %al,(%rax) */
872 : case 0x01: /* add %eax,(%rax) */
873 0 : opType = OP_STORE;
874 0 : break;
875 0 : case 0x02: /* add (%rax),%al */
876 : case 0x03: /* add (%rax),%eax */
877 0 : opType = OP_LOAD;
878 0 : break;
879 :
880 0 : case 0x08: /* or %al,(%rax) */
881 : case 0x09: /* or %eax,(%rax) */
882 0 : opType = OP_STORE;
883 0 : break;
884 0 : case 0x0a: /* or (%rax),%al */
885 : case 0x0b: /* or (%rax),%eax */
886 0 : opType = OP_LOAD;
887 0 : break;
888 :
889 100843 : case 0x0f:
890 : {
891 100843 : switch (rip[1])
892 : {
893 100843 : case 0xb6: /* movzbl (%rax),%eax */
894 : case 0xb7: /* movzwl (%rax),%eax */
895 : case 0xbe: /* movsbl (%rax),%eax */
896 : case 0xbf: /* movswl (%rax),%eax */
897 100843 : opType = OP_LOAD;
898 100843 : break;
899 0 : default:
900 0 : break;
901 : }
902 100843 : break;
903 : }
904 8 : case 0xc6: /* movb $,(%rax) */
905 : case 0xc7: /* movl $,(%rax) */
906 8 : opType = OP_STORE;
907 8 : break;
908 :
909 0 : case 0x20: /* and %al,(%rax) */
910 : case 0x21: /* and %eax,(%rax) */
911 0 : opType = OP_STORE;
912 0 : break;
913 0 : case 0x22: /* and (%rax),%al */
914 : case 0x23: /* and (%rax),%eax */
915 0 : opType = OP_LOAD;
916 0 : break;
917 :
918 0 : case 0x28: /* sub %al,(%rax) */
919 : case 0x29: /* sub %eax,(%rax) */
920 0 : opType = OP_STORE;
921 0 : break;
922 0 : case 0x2a: /* sub (%rax),%al */
923 : case 0x2b: /* sub (%rax),%eax */
924 0 : opType = OP_LOAD;
925 0 : break;
926 :
927 0 : case 0x30: /* xor %al,(%rax) */
928 : case 0x31: /* xor %eax,(%rax) */
929 0 : opType = OP_STORE;
930 0 : break;
931 0 : case 0x32: /* xor (%rax),%al */
932 : case 0x33: /* xor (%rax),%eax */
933 0 : opType = OP_LOAD;
934 0 : break;
935 :
936 0 : case 0x38: /* cmp %al,(%rax) */
937 : case 0x39: /* cmp %eax,(%rax) */
938 0 : opType = OP_LOAD;
939 0 : break;
940 0 : case 0x40:
941 : {
942 0 : switch (rip[1])
943 : {
944 0 : case 0x00: /* add %spl,(%rax) */
945 0 : opType = OP_STORE;
946 0 : break;
947 0 : case 0x02: /* add (%rax),%spl */
948 0 : opType = OP_LOAD;
949 0 : break;
950 0 : case 0x28: /* sub %spl,(%rax) */
951 0 : opType = OP_STORE;
952 0 : break;
953 0 : case 0x2a: /* sub (%rax),%spl */
954 0 : opType = OP_LOAD;
955 0 : break;
956 0 : case 0x3a: /* cmp (%rax),%spl */
957 0 : opType = OP_LOAD;
958 0 : break;
959 0 : case 0x8a: /* mov (%rax),%spl */
960 0 : opType = OP_LOAD;
961 0 : break;
962 0 : default:
963 0 : break;
964 : }
965 0 : break;
966 : }
967 : #if defined(__x86_64__)
968 0 : case 0x41: /* reg=%al/%eax, X=%r8 */
969 : case 0x42: /* reg=%al/%eax, X=%rax,%r8,1 */
970 : case 0x43: /* reg=%al/%eax, X=%r8,%r8,1 */
971 : case 0x44: /* reg=%r8b/%r8w, X = %rax */
972 : case 0x45: /* reg=%r8b/%r8w, X = %r8 */
973 : case 0x46: /* reg=%r8b/%r8w, X = %rax,%r8,1 */
974 : case 0x47: /* reg=%r8b/%r8w, X = %r8,%r8,1 */
975 : {
976 0 : switch (rip[1])
977 : {
978 0 : case 0x00: /* add regb,(X) */
979 : case 0x01: /* add regl,(X) */
980 0 : opType = OP_STORE;
981 0 : break;
982 0 : case 0x02: /* add (X),regb */
983 : case 0x03: /* add (X),regl */
984 0 : opType = OP_LOAD;
985 0 : break;
986 0 : case 0x0f:
987 : {
988 0 : switch (rip[2])
989 : {
990 0 : case 0xb6: /* movzbl (X),regl */
991 : case 0xb7: /* movzwl (X),regl */
992 : case 0xbe: /* movsbl (X),regl */
993 : case 0xbf: /* movswl (X),regl */
994 0 : opType = OP_LOAD;
995 0 : break;
996 0 : default:
997 0 : break;
998 : }
999 0 : break;
1000 : }
1001 0 : case 0x28: /* sub regb,(X) */
1002 : case 0x29: /* sub regl,(X) */
1003 0 : opType = OP_STORE;
1004 0 : break;
1005 0 : case 0x2a: /* sub (X),regb */
1006 : case 0x2b: /* sub (X),regl */
1007 0 : opType = OP_LOAD;
1008 0 : break;
1009 0 : case 0x38: /* cmp regb,(X) */
1010 : case 0x39: /* cmp regl,(X) */
1011 0 : opType = OP_LOAD;
1012 0 : break;
1013 0 : case 0x80: /* cmpb,... $,(X) */
1014 : case 0x81: /* cmpl,... $,(X) */
1015 : case 0x83: /* cmpl,... $,(X) */
1016 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1017 0 : break;
1018 0 : case 0x88: /* mov regb,(X) */
1019 : case 0x89: /* mov regl,(X) */
1020 0 : opType = OP_STORE;
1021 0 : break;
1022 0 : case 0x8a: /* mov (X),regb */
1023 : case 0x8b: /* mov (X),regl */
1024 0 : opType = OP_LOAD;
1025 0 : break;
1026 0 : case 0xc6: /* movb $,(X) */
1027 : case 0xc7: /* movl $,(X) */
1028 0 : opType = OP_STORE;
1029 0 : break;
1030 0 : case 0x84: /* test %al,(X) */
1031 0 : opType = OP_LOAD;
1032 0 : break;
1033 0 : case 0xf6: /* testb $,(X) or notb (X) */
1034 : case 0xf7: /* testl $,(X) or notl (X)*/
1035 : {
1036 0 : if (rip[2] < 0x10) /* test (X) */
1037 0 : opType = OP_LOAD;
1038 : else /* not (X) */
1039 0 : opType = OP_STORE;
1040 0 : break;
1041 : }
1042 0 : default:
1043 0 : break;
1044 : }
1045 0 : break;
1046 : }
1047 0 : case 0x48: /* reg=%rax, X=%rax or %rax,%rax,1 */
1048 : case 0x49: /* reg=%rax, X=%r8 or %r8,%rax,1 */
1049 : case 0x4a: /* reg=%rax, X=%rax,%r8,1 */
1050 : case 0x4b: /* reg=%rax, X=%r8,%r8,1 */
1051 : case 0x4c: /* reg=%r8, X=%rax or %rax,%rax,1 */
1052 : case 0x4d: /* reg=%r8, X=%r8 or %r8,%rax,1 */
1053 : case 0x4e: /* reg=%r8, X=%rax,%r8,1 */
1054 : case 0x4f: /* reg=%r8, X=%r8,%r8,1 */
1055 : {
1056 0 : switch (rip[1])
1057 : {
1058 0 : case 0x01: /* add reg,(X) */
1059 0 : opType = OP_STORE;
1060 0 : break;
1061 0 : case 0x03: /* add (X),reg */
1062 0 : opType = OP_LOAD;
1063 0 : break;
1064 :
1065 0 : case 0x09: /* or reg,(%rax) */
1066 0 : opType = OP_STORE;
1067 0 : break;
1068 0 : case 0x0b: /* or (%rax),reg */
1069 0 : opType = OP_LOAD;
1070 0 : break;
1071 0 : case 0x0f:
1072 : {
1073 0 : switch (rip[2])
1074 : {
1075 0 : case 0xc3: /* movnti reg,(X) */
1076 0 : opType = OP_STORE;
1077 0 : break;
1078 0 : default:
1079 0 : break;
1080 : }
1081 0 : break;
1082 : }
1083 0 : case 0x21: /* and reg,(X) */
1084 0 : opType = OP_STORE;
1085 0 : break;
1086 0 : case 0x23: /* and (X),reg */
1087 0 : opType = OP_LOAD;
1088 0 : break;
1089 :
1090 0 : case 0x29: /* sub reg,(X) */
1091 0 : opType = OP_STORE;
1092 0 : break;
1093 0 : case 0x2b: /* sub (X),reg */
1094 0 : opType = OP_LOAD;
1095 0 : break;
1096 :
1097 0 : case 0x31: /* xor reg,(X) */
1098 0 : opType = OP_STORE;
1099 0 : break;
1100 0 : case 0x33: /* xor (X),reg */
1101 0 : opType = OP_LOAD;
1102 0 : break;
1103 :
1104 0 : case 0x39: /* cmp reg,(X) */
1105 0 : opType = OP_LOAD;
1106 0 : break;
1107 :
1108 0 : case 0x81:
1109 : case 0x83:
1110 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1111 0 : break;
1112 :
1113 0 : case 0x85: /* test reg,(X) */
1114 0 : opType = OP_LOAD;
1115 0 : break;
1116 :
1117 0 : case 0x89: /* mov reg,(X) */
1118 0 : opType = OP_STORE;
1119 0 : break;
1120 0 : case 0x8b: /* mov (X),reg */
1121 0 : opType = OP_LOAD;
1122 0 : break;
1123 :
1124 0 : case 0xc7: /* movq $,(X) */
1125 0 : opType = OP_STORE;
1126 0 : break;
1127 :
1128 0 : case 0xf7:
1129 : {
1130 0 : if (rip[2] < 0x10) /* testq $,(X) */
1131 0 : opType = OP_LOAD;
1132 : else /* notq (X) */
1133 0 : opType = OP_STORE;
1134 0 : break;
1135 : }
1136 0 : default:
1137 0 : break;
1138 : }
1139 0 : break;
1140 : }
1141 : #endif
1142 0 : case 0x66:
1143 : {
1144 0 : switch (rip[1])
1145 : {
1146 0 : case 0x01: /* add %ax,(%rax) */
1147 0 : opType = OP_STORE;
1148 0 : break;
1149 0 : case 0x03: /* add (%rax),%ax */
1150 0 : opType = OP_LOAD;
1151 0 : break;
1152 0 : case 0x0f:
1153 : {
1154 0 : switch (rip[2])
1155 : {
1156 0 : case 0x2e: /* ucomisd (%rax),%xmm0 */
1157 0 : opType = OP_LOAD;
1158 0 : break;
1159 0 : case 0x6f: /* movdqa (%rax),%xmm0 */
1160 0 : opType = OP_LOAD;
1161 0 : break;
1162 0 : case 0x7f: /* movdqa %xmm0,(%rax) */
1163 0 : opType = OP_STORE;
1164 0 : break;
1165 0 : case 0xb6: /* movzbw (%rax),%ax */
1166 0 : opType = OP_LOAD;
1167 0 : break;
1168 0 : case 0xe7: /* movntdq %xmm0,(%rax) */
1169 0 : opType = OP_STORE;
1170 0 : break;
1171 0 : default:
1172 0 : break;
1173 : }
1174 0 : break;
1175 : }
1176 0 : case 0x29: /* sub %ax,(%rax) */
1177 0 : opType = OP_STORE;
1178 0 : break;
1179 0 : case 0x2b: /* sub (%rax),%ax */
1180 0 : opType = OP_LOAD;
1181 0 : break;
1182 0 : case 0x39: /* cmp %ax,(%rax) */
1183 0 : opType = OP_LOAD;
1184 0 : break;
1185 : #if defined(__x86_64__)
1186 0 : case 0x41: /* reg = %ax (or %xmm0), X = %r8 */
1187 : case 0x42: /* reg = %ax (or %xmm0), X = %rax,%r8,1 */
1188 : case 0x43: /* reg = %ax (or %xmm0), X = %r8,%r8,1 */
1189 : case 0x44: /* reg = %r8w (or %xmm8), X = %rax */
1190 : case 0x45: /* reg = %r8w (or %xmm8), X = %r8 */
1191 : case 0x46: /* reg = %r8w (or %xmm8), X = %rax,%r8,1 */
1192 : case 0x47: /* reg = %r8w (or %xmm8), X = %r8,%r8,1 */
1193 : {
1194 0 : switch (rip[2])
1195 : {
1196 0 : case 0x01: /* add reg,(X) */
1197 0 : opType = OP_STORE;
1198 0 : break;
1199 0 : case 0x03: /* add (X),reg */
1200 0 : opType = OP_LOAD;
1201 0 : break;
1202 0 : case 0x0f:
1203 : {
1204 0 : switch (rip[3])
1205 : {
1206 0 : case 0x2e: /* ucomisd (X),reg */
1207 0 : opType = OP_LOAD;
1208 0 : break;
1209 0 : case 0x6f: /* movdqa (X),reg */
1210 0 : opType = OP_LOAD;
1211 0 : break;
1212 0 : case 0x7f: /* movdqa reg,(X) */
1213 0 : opType = OP_STORE;
1214 0 : break;
1215 0 : case 0xb6: /* movzbw (X),reg */
1216 0 : opType = OP_LOAD;
1217 0 : break;
1218 0 : case 0xe7: /* movntdq reg,(X) */
1219 0 : opType = OP_STORE;
1220 0 : break;
1221 0 : default:
1222 0 : break;
1223 : }
1224 0 : break;
1225 : }
1226 0 : case 0x29: /* sub reg,(X) */
1227 0 : opType = OP_STORE;
1228 0 : break;
1229 0 : case 0x2b: /* sub (X),reg */
1230 0 : opType = OP_LOAD;
1231 0 : break;
1232 0 : case 0x39: /* cmp reg,(X) */
1233 0 : opType = OP_LOAD;
1234 0 : break;
1235 0 : case 0x81: /* cmpw,... $,(X) */
1236 : case 0x83: /* cmpw,... $,(X) */
1237 0 : opType = CPLVirtualMemGetOpTypeImm(rip[3]);
1238 0 : break;
1239 0 : case 0x85: /* test reg,(X) */
1240 0 : opType = OP_LOAD;
1241 0 : break;
1242 0 : case 0x89: /* mov reg,(X) */
1243 0 : opType = OP_STORE;
1244 0 : break;
1245 0 : case 0x8b: /* mov (X),reg */
1246 0 : opType = OP_LOAD;
1247 0 : break;
1248 0 : case 0xc7: /* movw $,(X) */
1249 0 : opType = OP_STORE;
1250 0 : break;
1251 0 : case 0xf7:
1252 : {
1253 0 : if (rip[3] < 0x10) /* testw $,(X) */
1254 0 : opType = OP_LOAD;
1255 : else /* notw (X) */
1256 0 : opType = OP_STORE;
1257 0 : break;
1258 : }
1259 0 : default:
1260 0 : break;
1261 : }
1262 0 : break;
1263 : }
1264 : #endif
1265 0 : case 0x81: /* cmpw,... $,(%rax) */
1266 : case 0x83: /* cmpw,... $,(%rax) */
1267 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1268 0 : break;
1269 :
1270 0 : case 0x85: /* test %ax,(%rax) */
1271 0 : opType = OP_LOAD;
1272 0 : break;
1273 0 : case 0x89: /* mov %ax,(%rax) */
1274 0 : opType = OP_STORE;
1275 0 : break;
1276 0 : case 0x8b: /* mov (%rax),%ax */
1277 0 : opType = OP_LOAD;
1278 0 : break;
1279 0 : case 0xc7: /* movw $,(%rax) */
1280 0 : opType = OP_STORE;
1281 0 : break;
1282 0 : case 0xf3:
1283 : {
1284 0 : switch (rip[2])
1285 : {
1286 0 : case 0xa5: /* rep movsw %ds:(%rsi),%es:(%rdi) */
1287 0 : opType = OP_MOVS_RSI_RDI;
1288 0 : break;
1289 0 : default:
1290 0 : break;
1291 : }
1292 0 : break;
1293 : }
1294 0 : case 0xf7: /* testw $,(%rax) or notw (%rax) */
1295 : {
1296 0 : if (rip[2] < 0x10) /* test */
1297 0 : opType = OP_LOAD;
1298 : else /* not */
1299 0 : opType = OP_STORE;
1300 0 : break;
1301 : }
1302 0 : default:
1303 0 : break;
1304 : }
1305 0 : break;
1306 : }
1307 0 : case 0x80: /* cmpb,... $,(%rax) */
1308 : case 0x81: /* cmpl,... $,(%rax) */
1309 : case 0x83: /* cmpl,... $,(%rax) */
1310 0 : opType = CPLVirtualMemGetOpTypeImm(rip[1]);
1311 0 : break;
1312 0 : case 0x84: /* test %al,(%rax) */
1313 : case 0x85: /* test %eax,(%rax) */
1314 0 : opType = OP_LOAD;
1315 0 : break;
1316 0 : case 0x88: /* mov %al,(%rax) */
1317 0 : opType = OP_STORE;
1318 0 : break;
1319 0 : case 0x89: /* mov %eax,(%rax) */
1320 0 : opType = OP_STORE;
1321 0 : break;
1322 0 : case 0x8a: /* mov (%rax),%al */
1323 0 : opType = OP_LOAD;
1324 0 : break;
1325 0 : case 0x8b: /* mov (%rax),%eax */
1326 0 : opType = OP_LOAD;
1327 0 : break;
1328 0 : case 0xd9: /* 387 float */
1329 : {
1330 0 : if (rip[1] < 0x08) /* flds (%eax) */
1331 0 : opType = OP_LOAD;
1332 0 : else if (rip[1] >= 0x18 && rip[1] <= 0x20) /* fstps (%eax) */
1333 0 : opType = OP_STORE;
1334 0 : break;
1335 : }
1336 0 : case 0xf2: /* SSE 2 */
1337 : {
1338 0 : switch (rip[1])
1339 : {
1340 0 : case 0x0f:
1341 : {
1342 0 : switch (rip[2])
1343 : {
1344 0 : case 0x10: /* movsd (%rax),%xmm0 */
1345 0 : opType = OP_LOAD;
1346 0 : break;
1347 0 : case 0x11: /* movsd %xmm0,(%rax) */
1348 0 : opType = OP_STORE;
1349 0 : break;
1350 0 : case 0x58: /* addsd (%rax),%xmm0 */
1351 0 : opType = OP_LOAD;
1352 0 : break;
1353 0 : case 0x59: /* mulsd (%rax),%xmm0 */
1354 0 : opType = OP_LOAD;
1355 0 : break;
1356 0 : case 0x5c: /* subsd (%rax),%xmm0 */
1357 0 : opType = OP_LOAD;
1358 0 : break;
1359 0 : case 0x5e: /* divsd (%rax),%xmm0 */
1360 0 : opType = OP_LOAD;
1361 0 : break;
1362 0 : default:
1363 0 : break;
1364 : }
1365 0 : break;
1366 : }
1367 : #if defined(__x86_64__)
1368 0 : case 0x41: /* reg=%xmm0, X=%r8 or %r8,%rax,1 */
1369 : case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1370 : case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1371 : case 0x44: /* reg=%xmm8, X=%rax or %rax,%rax,1*/
1372 : case 0x45: /* reg=%xmm8, X=%r8 or %r8,%rax,1 */
1373 : case 0x46: /* reg=%xmm8, X=%rax,%r8,1 */
1374 : case 0x47: /* reg=%xmm8, X=%r8,%r8,1 */
1375 : {
1376 0 : switch (rip[2])
1377 : {
1378 0 : case 0x0f:
1379 : {
1380 0 : switch (rip[3])
1381 : {
1382 0 : case 0x10: /* movsd (X),reg */
1383 0 : opType = OP_LOAD;
1384 0 : break;
1385 0 : case 0x11: /* movsd reg,(X) */
1386 0 : opType = OP_STORE;
1387 0 : break;
1388 0 : case 0x58: /* addsd (X),reg */
1389 0 : opType = OP_LOAD;
1390 0 : break;
1391 0 : case 0x59: /* mulsd (X),reg */
1392 0 : opType = OP_LOAD;
1393 0 : break;
1394 0 : case 0x5c: /* subsd (X),reg */
1395 0 : opType = OP_LOAD;
1396 0 : break;
1397 0 : case 0x5e: /* divsd (X),reg */
1398 0 : opType = OP_LOAD;
1399 0 : break;
1400 0 : default:
1401 0 : break;
1402 : }
1403 0 : break;
1404 : }
1405 0 : default:
1406 0 : break;
1407 : }
1408 0 : break;
1409 : }
1410 : #endif
1411 0 : default:
1412 0 : break;
1413 : }
1414 0 : break;
1415 : }
1416 6 : case 0xf3:
1417 : {
1418 6 : switch (rip[1])
1419 : {
1420 0 : case 0x0f: /* SSE 2 */
1421 : {
1422 0 : switch (rip[2])
1423 : {
1424 0 : case 0x10: /* movss (%rax),%xmm0 */
1425 0 : opType = OP_LOAD;
1426 0 : break;
1427 0 : case 0x11: /* movss %xmm0,(%rax) */
1428 0 : opType = OP_STORE;
1429 0 : break;
1430 0 : case 0x6f: /* movdqu (%rax),%xmm0 */
1431 0 : opType = OP_LOAD;
1432 0 : break;
1433 0 : case 0x7f: /* movdqu %xmm0,(%rax) */
1434 0 : opType = OP_STORE;
1435 0 : break;
1436 0 : default:
1437 0 : break;
1438 : }
1439 0 : break;
1440 : }
1441 : #if defined(__x86_64__)
1442 4 : case 0x41: /* reg=%xmm0, X=%r8 */
1443 : case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1444 : case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1445 : case 0x44: /* reg=%xmm8, X = %rax */
1446 : case 0x45: /* reg=%xmm8, X = %r8 */
1447 : case 0x46: /* reg=%xmm8, X = %rax,%r8,1 */
1448 : case 0x47: /* reg=%xmm8, X = %r8,%r8,1 */
1449 : {
1450 4 : switch (rip[2])
1451 : {
1452 4 : case 0x0f: /* SSE 2 */
1453 : {
1454 4 : switch (rip[3])
1455 : {
1456 0 : case 0x10: /* movss (X),reg */
1457 0 : opType = OP_LOAD;
1458 0 : break;
1459 0 : case 0x11: /* movss reg,(X) */
1460 0 : opType = OP_STORE;
1461 0 : break;
1462 4 : case 0x6f: /* movdqu (X),reg */
1463 4 : opType = OP_LOAD;
1464 4 : break;
1465 0 : case 0x7f: /* movdqu reg,(X) */
1466 0 : opType = OP_STORE;
1467 0 : break;
1468 0 : default:
1469 0 : break;
1470 : }
1471 4 : break;
1472 : }
1473 0 : default:
1474 0 : break;
1475 : }
1476 4 : break;
1477 : }
1478 0 : case 0x48:
1479 : {
1480 0 : switch (rip[2])
1481 : {
1482 0 : case 0xa5: /* rep movsq %ds:(%rsi),%es:(%rdi) */
1483 0 : opType = OP_MOVS_RSI_RDI;
1484 0 : break;
1485 0 : default:
1486 0 : break;
1487 : }
1488 0 : break;
1489 : }
1490 : #endif
1491 0 : case 0xa4: /* rep movsb %ds:(%rsi),%es:(%rdi) */
1492 : case 0xa5: /* rep movsl %ds:(%rsi),%es:(%rdi) */
1493 0 : opType = OP_MOVS_RSI_RDI;
1494 0 : break;
1495 0 : case 0xa6: /* repz cmpsb %es:(%rdi),%ds:(%rsi) */
1496 0 : opType = OP_LOAD;
1497 0 : break;
1498 2 : default:
1499 2 : break;
1500 : }
1501 6 : break;
1502 : }
1503 0 : case 0xf6: /* testb $,(%rax) or notb (%rax) */
1504 : case 0xf7: /* testl $,(%rax) or notl (%rax) */
1505 : {
1506 0 : if (rip[1] < 0x10) /* test */
1507 0 : opType = OP_LOAD;
1508 : else /* not */
1509 0 : opType = OP_STORE;
1510 0 : break;
1511 : }
1512 8 : default:
1513 8 : break;
1514 : }
1515 : #endif
1516 100865 : return opType;
1517 : }
1518 : #endif
1519 :
1520 : /************************************************************************/
1521 : /* CPLVirtualMemManagerPinAddrInternal() */
1522 : /************************************************************************/
1523 :
1524 : static int
1525 100865 : CPLVirtualMemManagerPinAddrInternal(CPLVirtualMemMsgToWorkerThread *msg)
1526 : {
1527 100865 : char wait_ready = '\0';
1528 100865 : char response_buf[4] = {};
1529 :
1530 : // Wait for the helper thread to be ready to process another request.
1531 : while (true)
1532 : {
1533 : const int ret = static_cast<int>(
1534 100865 : read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1));
1535 100865 : if (ret < 0 && errno == EINTR)
1536 : {
1537 : // NOP
1538 : }
1539 : else
1540 : {
1541 100865 : IGNORE_OR_ASSERT_IN_DEBUG(ret == 1);
1542 100865 : break;
1543 : }
1544 0 : }
1545 :
1546 : // Pass the address that caused the fault to the helper thread.
1547 : const ssize_t nRetWrite =
1548 100865 : write(pVirtualMemManager->pipefd_to_thread[1], msg, sizeof(*msg));
1549 100865 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(*msg));
1550 :
1551 : // Wait that the helper thread has fixed the fault.
1552 : while (true)
1553 : {
1554 : const int ret = static_cast<int>(
1555 100865 : read(pVirtualMemManager->pipefd_from_thread[0], response_buf, 4));
1556 100865 : if (ret < 0 && errno == EINTR)
1557 : {
1558 : // NOP
1559 : }
1560 : else
1561 : {
1562 100865 : IGNORE_OR_ASSERT_IN_DEBUG(ret == 4);
1563 100865 : break;
1564 : }
1565 0 : }
1566 :
1567 : // In case the helper thread did not recognize the address as being
1568 : // one that it should take care of, just rely on the previous SIGSEGV
1569 : // handler (with might abort the process).
1570 100865 : return (memcmp(response_buf, MAPPING_FOUND, 4) == 0);
1571 : }
1572 :
1573 : /************************************************************************/
1574 : /* CPLVirtualMemPin() */
1575 : /************************************************************************/
1576 :
1577 0 : void CPLVirtualMemPin(CPLVirtualMem *ctxt, void *pAddr, size_t nSize,
1578 : int bWriteOp)
1579 : {
1580 0 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
1581 0 : return;
1582 :
1583 : CPLVirtualMemMsgToWorkerThread msg;
1584 :
1585 0 : memset(&msg, 0, sizeof(msg));
1586 0 : msg.hRequesterThread = pthread_self();
1587 0 : msg.opType = (bWriteOp) ? OP_STORE : OP_LOAD;
1588 :
1589 0 : char *pBase = reinterpret_cast<char *>(ALIGN_DOWN(pAddr, ctxt->nPageSize));
1590 0 : const size_t n = (reinterpret_cast<char *>(pAddr) - pBase + nSize +
1591 0 : ctxt->nPageSize - 1) /
1592 0 : ctxt->nPageSize;
1593 0 : for (size_t i = 0; i < n; i++)
1594 : {
1595 0 : msg.pFaultAddr = reinterpret_cast<char *>(pBase) + i * ctxt->nPageSize;
1596 0 : CPLVirtualMemManagerPinAddrInternal(&msg);
1597 : }
1598 : }
1599 :
1600 : /************************************************************************/
1601 : /* CPLVirtualMemManagerSIGSEGVHandler() */
1602 : /************************************************************************/
1603 :
1604 : #if defined(__x86_64__)
1605 : #define REG_IP REG_RIP
1606 : #define REG_SI REG_RSI
1607 : #define REG_DI REG_RDI
1608 : #elif defined(__i386__)
1609 : #define REG_IP REG_EIP
1610 : #define REG_SI REG_ESI
1611 : #define REG_DI REG_EDI
1612 : #endif
1613 :
1614 : // Must take care of only using "asynchronous-signal-safe" functions in a signal
1615 : // handler pthread_self(), read() and write() are such. See:
1616 : // https://www.securecoding.cert.org/confluence/display/seccode/SIG30-C.+Call+only+asynchronous-safe+functions+within+signal+handlers
1617 100864 : static void CPLVirtualMemManagerSIGSEGVHandler(int the_signal,
1618 : siginfo_t *the_info,
1619 : void *the_ctxt)
1620 : {
1621 : CPLVirtualMemMsgToWorkerThread msg;
1622 :
1623 100864 : memset(&msg, 0, sizeof(msg));
1624 100864 : msg.pFaultAddr = the_info->si_addr;
1625 100864 : msg.hRequesterThread = pthread_self();
1626 :
1627 : #if defined(__x86_64__) || defined(__i386__)
1628 100864 : ucontext_t *the_ucontext = static_cast<ucontext_t *>(the_ctxt);
1629 100864 : const GByte *rip = reinterpret_cast<const GByte *>(
1630 100864 : the_ucontext->uc_mcontext.gregs[REG_IP]);
1631 100864 : msg.opType = CPLVirtualMemGetOpType(rip);
1632 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1633 : fprintfstderr("at rip %p, bytes: %02x %02x %02x %02x\n", rip, rip[0],
1634 : rip[1], rip[2], rip[3]);
1635 : #endif
1636 100865 : if (msg.opType == OP_MOVS_RSI_RDI)
1637 : {
1638 0 : void *rsi =
1639 0 : reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_SI]);
1640 0 : void *rdi =
1641 0 : reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_DI]);
1642 :
1643 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1644 : fprintfstderr("fault=%p rsi=%p rsi=%p\n", msg.pFaultAddr, rsi, rdi);
1645 : #endif
1646 0 : if (msg.pFaultAddr == rsi)
1647 : {
1648 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1649 : fprintfstderr("load\n");
1650 : #endif
1651 0 : msg.opType = OP_LOAD;
1652 : }
1653 0 : else if (msg.pFaultAddr == rdi)
1654 : {
1655 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1656 : fprintfstderr("store\n");
1657 : #endif
1658 0 : msg.opType = OP_STORE;
1659 : }
1660 : }
1661 : #ifdef DEBUG_VIRTUALMEM
1662 : else if (msg.opType == OP_UNKNOWN)
1663 : {
1664 : static bool bHasWarned = false;
1665 : if (!bHasWarned)
1666 : {
1667 : bHasWarned = true;
1668 : fprintfstderr("at rip %p, unknown bytes: %02x %02x %02x %02x\n",
1669 : rip, rip[0], rip[1], rip[2], rip[3]);
1670 : }
1671 : }
1672 : #endif
1673 : #else
1674 : msg.opType = OP_UNKNOWN;
1675 : #endif
1676 :
1677 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1678 : fprintfstderr("entering handler for %X (addr=%p)\n", pthread_self(),
1679 : the_info->si_addr);
1680 : #endif
1681 :
1682 100865 : if (the_info->si_code != SEGV_ACCERR)
1683 : {
1684 0 : pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1685 0 : return;
1686 : }
1687 :
1688 100865 : if (!CPLVirtualMemManagerPinAddrInternal(&msg))
1689 : {
1690 : // In case the helper thread did not recognize the address as being
1691 : // one that it should take care of, just rely on the previous SIGSEGV
1692 : // handler (with might abort the process).
1693 0 : pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1694 : }
1695 :
1696 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1697 : fprintfstderr("leaving handler for %X (addr=%p)\n", pthread_self(),
1698 : the_info->si_addr);
1699 : #endif
1700 : }
1701 :
1702 : /************************************************************************/
1703 : /* CPLVirtualMemManagerThread() */
1704 : /************************************************************************/
1705 :
1706 100867 : static void CPLVirtualMemManagerThread(void * /* unused_param */)
1707 : {
1708 : while (true)
1709 : {
1710 100867 : char i_m_ready = 1;
1711 100867 : CPLVirtualMemVMA *ctxt = nullptr;
1712 100867 : bool bMappingFound = false;
1713 : CPLVirtualMemMsgToWorkerThread msg;
1714 :
1715 : // Signal that we are ready to process a new request.
1716 : ssize_t nRetWrite =
1717 100867 : write(pVirtualMemManager->pipefd_wait_thread[1], &i_m_ready, 1);
1718 100867 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 1);
1719 :
1720 : // Fetch the address to process.
1721 : const ssize_t nRetRead =
1722 100867 : read(pVirtualMemManager->pipefd_to_thread[0], &msg, sizeof(msg));
1723 100866 : IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == sizeof(msg));
1724 :
1725 : // If CPLVirtualMemManagerTerminate() is called, it will use BYEBYE_ADDR
1726 : // as a means to ask for our termination.
1727 100866 : if (msg.pFaultAddr == BYEBYE_ADDR)
1728 1 : break;
1729 :
1730 : /* Lookup for a mapping that contains addr */
1731 100865 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
1732 101127 : for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
1733 : {
1734 101127 : ctxt = pVirtualMemManager->pasVirtualMem[i];
1735 101127 : if (static_cast<char *>(msg.pFaultAddr) >=
1736 101127 : static_cast<char *>(ctxt->sBase.pData) &&
1737 100891 : static_cast<char *>(msg.pFaultAddr) <
1738 100891 : static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
1739 : {
1740 100865 : bMappingFound = true;
1741 100865 : break;
1742 : }
1743 : }
1744 100865 : CPLReleaseMutex(hVirtualMemManagerMutex);
1745 :
1746 100865 : if (bMappingFound)
1747 : {
1748 100865 : char *const start_page_addr = static_cast<char *>(
1749 100865 : ALIGN_DOWN(msg.pFaultAddr, ctxt->sBase.nPageSize));
1750 100865 : const int iPage =
1751 100865 : static_cast<int>((static_cast<char *>(start_page_addr) -
1752 100865 : static_cast<char *>(ctxt->sBase.pData)) /
1753 100865 : ctxt->sBase.nPageSize);
1754 :
1755 100865 : if (iPage == ctxt->iLastPage)
1756 : {
1757 : // In case 2 threads try to access the same page concurrently it
1758 : // is possible that we are asked to mapped the page again
1759 : // whereas it is always mapped. However, if that number of
1760 : // successive retries is too high, this is certainly a sign that
1761 : // something else happen, like trying to write-access a
1762 : // read-only page 100 is a bit of magic number. Rouault believes
1763 : // it must be at least the number of concurrent threads. 100
1764 : // seems to be really safe!
1765 36625 : ctxt->nRetry++;
1766 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1767 : fprintfstderr("retry on page %d : %d\n", iPage, ctxt->nRetry);
1768 : #endif
1769 36625 : if (ctxt->nRetry >= 100)
1770 : {
1771 0 : CPLError(CE_Failure, CPLE_AppDefined,
1772 : "CPLVirtualMemManagerThread: trying to "
1773 : "write into read-only mapping");
1774 0 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1775 : MAPPING_NOT_FOUND, 4);
1776 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1777 0 : break;
1778 : }
1779 36625 : else if (msg.opType != OP_LOAD &&
1780 5 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1781 5 : !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1782 : {
1783 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1784 : fprintfstderr("switching page %d to write mode\n", iPage);
1785 : #endif
1786 5 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
1787 : const int nRet =
1788 5 : mprotect(start_page_addr, ctxt->sBase.nPageSize,
1789 : PROT_READ | PROT_WRITE);
1790 5 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1791 : }
1792 : }
1793 : else
1794 : {
1795 64240 : ctxt->iLastPage = iPage;
1796 64240 : ctxt->nRetry = 0;
1797 :
1798 64240 : if (TEST_BIT(ctxt->pabitMappedPages, iPage))
1799 : {
1800 33 : if (msg.opType != OP_LOAD &&
1801 0 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1802 0 : !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1803 : {
1804 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1805 : fprintfstderr("switching page %d to write mode\n",
1806 : iPage);
1807 : #endif
1808 0 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
1809 : const int nRet =
1810 0 : mprotect(start_page_addr, ctxt->sBase.nPageSize,
1811 : PROT_READ | PROT_WRITE);
1812 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1813 : }
1814 : else
1815 : {
1816 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1817 : fprintfstderr("unexpected case for page %d\n", iPage);
1818 : #endif
1819 : }
1820 : }
1821 : else
1822 : {
1823 : void *const pPageToFill =
1824 64207 : CPLVirtualMemGetPageToFill(ctxt, start_page_addr);
1825 :
1826 64207 : size_t nToFill = ctxt->sBase.nPageSize;
1827 64207 : if (start_page_addr + nToFill >=
1828 64207 : static_cast<char *>(ctxt->sBase.pData) +
1829 64207 : ctxt->sBase.nSize)
1830 : {
1831 21392 : nToFill = static_cast<char *>(ctxt->sBase.pData) +
1832 21392 : ctxt->sBase.nSize - start_page_addr;
1833 : }
1834 :
1835 64207 : ctxt->pfnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
1836 64207 : start_page_addr - static_cast<char *>(
1837 64207 : ctxt->sBase.pData),
1838 : pPageToFill, nToFill,
1839 : ctxt->sBase.pCbkUserData);
1840 :
1841 : // Now remap this page to its target address and
1842 : // register it in the LRU.
1843 64207 : CPLVirtualMemAddPage(ctxt, start_page_addr, pPageToFill,
1844 : msg.opType, msg.hRequesterThread);
1845 : }
1846 : }
1847 :
1848 : // Warn the segfault handler that we have finished our job.
1849 100865 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1850 : MAPPING_FOUND, 4);
1851 100865 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1852 : }
1853 : else
1854 : {
1855 : // Warn the segfault handler that we have finished our job
1856 : // but that the fault didn't occur in a memory range that
1857 : // is under our responsibility.
1858 0 : CPLError(CE_Failure, CPLE_AppDefined,
1859 : "CPLVirtualMemManagerThread: no mapping found");
1860 0 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1861 : MAPPING_NOT_FOUND, 4);
1862 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1863 : }
1864 100865 : }
1865 1 : }
1866 :
1867 : /************************************************************************/
1868 : /* CPLVirtualMemManagerInit() */
1869 : /************************************************************************/
1870 :
1871 17 : static bool CPLVirtualMemManagerInit()
1872 : {
1873 34 : CPLMutexHolderD(&hVirtualMemManagerMutex);
1874 17 : if (pVirtualMemManager != nullptr)
1875 15 : return true;
1876 :
1877 : struct sigaction act;
1878 2 : pVirtualMemManager = static_cast<CPLVirtualMemManager *>(
1879 2 : VSI_MALLOC_VERBOSE(sizeof(CPLVirtualMemManager)));
1880 2 : if (pVirtualMemManager == nullptr)
1881 0 : return false;
1882 2 : pVirtualMemManager->pasVirtualMem = nullptr;
1883 2 : pVirtualMemManager->nVirtualMemCount = 0;
1884 2 : int nRet = pipe(pVirtualMemManager->pipefd_to_thread);
1885 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1886 2 : nRet = pipe(pVirtualMemManager->pipefd_from_thread);
1887 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1888 2 : nRet = pipe(pVirtualMemManager->pipefd_wait_thread);
1889 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1890 :
1891 : // Install our custom SIGSEGV handler.
1892 2 : act.sa_sigaction = CPLVirtualMemManagerSIGSEGVHandler;
1893 2 : sigemptyset(&act.sa_mask);
1894 2 : act.sa_flags = SA_SIGINFO;
1895 2 : nRet = sigaction(SIGSEGV, &act, &pVirtualMemManager->oldact);
1896 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1897 :
1898 : // Starts the helper thread.
1899 4 : pVirtualMemManager->hHelperThread =
1900 2 : CPLCreateJoinableThread(CPLVirtualMemManagerThread, nullptr);
1901 2 : if (pVirtualMemManager->hHelperThread == nullptr)
1902 : {
1903 0 : VSIFree(pVirtualMemManager);
1904 0 : pVirtualMemManager = nullptr;
1905 0 : return false;
1906 : }
1907 2 : return true;
1908 : }
1909 :
1910 : /************************************************************************/
1911 : /* CPLVirtualMemManagerTerminate() */
1912 : /************************************************************************/
1913 :
1914 1 : void CPLVirtualMemManagerTerminate(void)
1915 : {
1916 1 : if (pVirtualMemManager == nullptr)
1917 0 : return;
1918 :
1919 : CPLVirtualMemMsgToWorkerThread msg;
1920 1 : msg.pFaultAddr = BYEBYE_ADDR;
1921 1 : msg.opType = OP_UNKNOWN;
1922 : memset(&msg.hRequesterThread, 0, sizeof(msg.hRequesterThread));
1923 :
1924 : // Wait for the helper thread to be ready.
1925 : char wait_ready;
1926 : const ssize_t nRetRead =
1927 1 : read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1);
1928 1 : IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == 1);
1929 :
1930 : // Ask it to terminate.
1931 : const ssize_t nRetWrite =
1932 1 : write(pVirtualMemManager->pipefd_to_thread[1], &msg, sizeof(msg));
1933 1 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(msg));
1934 :
1935 : // Wait for its termination.
1936 1 : CPLJoinThread(pVirtualMemManager->hHelperThread);
1937 :
1938 : // Cleanup everything.
1939 1 : while (pVirtualMemManager->nVirtualMemCount > 0)
1940 0 : CPLVirtualMemFree(reinterpret_cast<CPLVirtualMem *>(
1941 : pVirtualMemManager
1942 0 : ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount - 1]));
1943 1 : CPLFree(pVirtualMemManager->pasVirtualMem);
1944 :
1945 1 : close(pVirtualMemManager->pipefd_to_thread[0]);
1946 1 : close(pVirtualMemManager->pipefd_to_thread[1]);
1947 1 : close(pVirtualMemManager->pipefd_from_thread[0]);
1948 1 : close(pVirtualMemManager->pipefd_from_thread[1]);
1949 1 : close(pVirtualMemManager->pipefd_wait_thread[0]);
1950 1 : close(pVirtualMemManager->pipefd_wait_thread[1]);
1951 :
1952 : // Restore previous handler.
1953 1 : sigaction(SIGSEGV, &pVirtualMemManager->oldact, nullptr);
1954 :
1955 1 : CPLFree(pVirtualMemManager);
1956 1 : pVirtualMemManager = nullptr;
1957 :
1958 1 : CPLDestroyMutex(hVirtualMemManagerMutex);
1959 1 : hVirtualMemManagerMutex = nullptr;
1960 : }
1961 :
1962 : #else // HAVE_VIRTUAL_MEM_VMA
1963 :
1964 : CPLVirtualMem *CPLVirtualMemNew(
1965 : size_t /* nSize */, size_t /* nCacheSize */, size_t /* nPageSizeHint */,
1966 : int /* bSingleThreadUsage */, CPLVirtualMemAccessMode /* eAccessMode */,
1967 : CPLVirtualMemCachePageCbk /* pfnCachePage */,
1968 : CPLVirtualMemUnCachePageCbk /* pfnUnCachePage */,
1969 : CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
1970 : {
1971 : CPLError(CE_Failure, CPLE_NotSupported,
1972 : "CPLVirtualMemNew() unsupported on "
1973 : "this operating system / configuration");
1974 : return nullptr;
1975 : }
1976 :
1977 : void CPLVirtualMemDeclareThread(CPLVirtualMem * /* ctxt */)
1978 : {
1979 : }
1980 :
1981 : void CPLVirtualMemUnDeclareThread(CPLVirtualMem * /* ctxt */)
1982 : {
1983 : }
1984 :
1985 : void CPLVirtualMemPin(CPLVirtualMem * /* ctxt */, void * /* pAddr */,
1986 : size_t /* nSize */, int /* bWriteOp */)
1987 : {
1988 : }
1989 :
1990 : void CPLVirtualMemManagerTerminate(void)
1991 : {
1992 : }
1993 :
1994 : #endif // HAVE_VIRTUAL_MEM_VMA
1995 :
1996 : #ifdef HAVE_MMAP
1997 :
1998 : /************************************************************************/
1999 : /* CPLVirtualMemFreeFileMemoryMapped() */
2000 : /************************************************************************/
2001 :
2002 28 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMem *ctxt)
2003 : {
2004 28 : const size_t nMappingSize = ctxt->nSize +
2005 28 : static_cast<GByte *>(ctxt->pData) -
2006 28 : static_cast<GByte *>(ctxt->pDataToFree);
2007 28 : const int nRet = munmap(ctxt->pDataToFree, nMappingSize);
2008 28 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
2009 28 : }
2010 :
2011 : /************************************************************************/
2012 : /* CPLVirtualMemFileMapNew() */
2013 : /************************************************************************/
2014 :
2015 28 : CPLVirtualMem *CPLVirtualMemFileMapNew(
2016 : VSILFILE *fp, vsi_l_offset nOffset, vsi_l_offset nLength,
2017 : CPLVirtualMemAccessMode eAccessMode,
2018 : CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2019 : {
2020 : #if SIZEOF_VOIDP == 4
2021 : if (nLength != static_cast<size_t>(nLength))
2022 : {
2023 : CPLError(CE_Failure, CPLE_AppDefined,
2024 : "nLength = " CPL_FRMT_GUIB
2025 : " incompatible with 32 bit architecture",
2026 : nLength);
2027 : return nullptr;
2028 : }
2029 : if (nOffset + CPLGetPageSize() !=
2030 : static_cast<vsi_l_offset>(
2031 : static_cast<off_t>(nOffset + CPLGetPageSize())))
2032 : {
2033 : CPLError(CE_Failure, CPLE_AppDefined,
2034 : "nOffset = " CPL_FRMT_GUIB
2035 : " incompatible with 32 bit architecture",
2036 : nOffset);
2037 : return nullptr;
2038 : }
2039 : #endif
2040 :
2041 : int fd = static_cast<int>(
2042 28 : reinterpret_cast<GUIntptr_t>(VSIFGetNativeFileDescriptorL(fp)));
2043 28 : if (fd == 0)
2044 : {
2045 0 : CPLError(CE_Failure, CPLE_AppDefined,
2046 : "Cannot operate on a virtual file");
2047 0 : return nullptr;
2048 : }
2049 :
2050 : const off_t nAlignedOffset =
2051 28 : static_cast<off_t>((nOffset / CPLGetPageSize()) * CPLGetPageSize());
2052 28 : size_t nAlignment = static_cast<size_t>(nOffset - nAlignedOffset);
2053 28 : size_t nMappingSize = static_cast<size_t>(nLength + nAlignment);
2054 :
2055 : // Need to ensure that the requested extent fits into the file size
2056 : // otherwise SIGBUS errors will occur when using the mapping.
2057 28 : vsi_l_offset nCurPos = VSIFTellL(fp);
2058 28 : if (VSIFSeekL(fp, 0, SEEK_END) != 0)
2059 0 : return nullptr;
2060 28 : vsi_l_offset nFileSize = VSIFTellL(fp);
2061 28 : if (nFileSize < nOffset + nLength)
2062 : {
2063 4 : if (eAccessMode != VIRTUALMEM_READWRITE)
2064 : {
2065 0 : CPLError(CE_Failure, CPLE_AppDefined,
2066 : "Trying to map an extent outside of the file");
2067 0 : CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2068 0 : return nullptr;
2069 : }
2070 : else
2071 : {
2072 4 : char ch = 0;
2073 8 : if (VSIFSeekL(fp, nOffset + nLength - 1, SEEK_SET) != 0 ||
2074 4 : VSIFWriteL(&ch, 1, 1, fp) != 1)
2075 : {
2076 0 : CPLError(CE_Failure, CPLE_AppDefined,
2077 : "Cannot extend file to mapping size");
2078 0 : CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2079 0 : return nullptr;
2080 : }
2081 : }
2082 : }
2083 28 : if (VSIFSeekL(fp, nCurPos, SEEK_SET) != 0)
2084 0 : return nullptr;
2085 :
2086 : CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2087 28 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2088 28 : if (ctxt == nullptr)
2089 0 : return nullptr;
2090 :
2091 : void *addr =
2092 28 : mmap(nullptr, nMappingSize,
2093 : eAccessMode == VIRTUALMEM_READWRITE ? PROT_READ | PROT_WRITE
2094 : : PROT_READ,
2095 : MAP_SHARED, fd, nAlignedOffset);
2096 28 : if (addr == MAP_FAILED)
2097 : {
2098 0 : int myerrno = errno;
2099 0 : CPLError(CE_Failure, CPLE_AppDefined, "mmap() failed : %s",
2100 : strerror(myerrno));
2101 0 : VSIFree(ctxt);
2102 : // cppcheck thinks we are leaking addr.
2103 : // cppcheck-suppress memleak
2104 0 : return nullptr;
2105 : }
2106 :
2107 28 : ctxt->eType = VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2108 28 : ctxt->nRefCount = 1;
2109 28 : ctxt->eAccessMode = eAccessMode;
2110 28 : ctxt->pData = static_cast<GByte *>(addr) + nAlignment;
2111 28 : ctxt->pDataToFree = addr;
2112 28 : ctxt->nSize = static_cast<size_t>(nLength);
2113 28 : ctxt->nPageSize = CPLGetPageSize();
2114 28 : ctxt->bSingleThreadUsage = false;
2115 28 : ctxt->pfnFreeUserData = pfnFreeUserData;
2116 28 : ctxt->pCbkUserData = pCbkUserData;
2117 :
2118 28 : return ctxt;
2119 : }
2120 :
2121 : #else // HAVE_MMAP
2122 :
2123 : CPLVirtualMem *CPLVirtualMemFileMapNew(
2124 : VSILFILE * /* fp */, vsi_l_offset /* nOffset */, vsi_l_offset /* nLength */,
2125 : CPLVirtualMemAccessMode /* eAccessMode */,
2126 : CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
2127 : {
2128 : CPLError(CE_Failure, CPLE_NotSupported,
2129 : "CPLVirtualMemFileMapNew() unsupported on this "
2130 : "operating system / configuration");
2131 : return nullptr;
2132 : }
2133 :
2134 : #endif // HAVE_MMAP
2135 :
2136 : /************************************************************************/
2137 : /* CPLGetPageSize() */
2138 : /************************************************************************/
2139 :
2140 105 : size_t CPLGetPageSize(void)
2141 : {
2142 : #if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
2143 105 : return static_cast<size_t>(sysconf(_SC_PAGESIZE));
2144 : #else
2145 : return 0;
2146 : #endif
2147 : }
2148 :
2149 : /************************************************************************/
2150 : /* CPLIsVirtualMemFileMapAvailable() */
2151 : /************************************************************************/
2152 :
2153 30 : int CPLIsVirtualMemFileMapAvailable(void)
2154 : {
2155 : #ifdef HAVE_MMAP
2156 30 : return TRUE;
2157 : #else
2158 : return FALSE;
2159 : #endif
2160 : }
2161 :
2162 : /************************************************************************/
2163 : /* CPLVirtualMemFree() */
2164 : /************************************************************************/
2165 :
2166 61 : void CPLVirtualMemFree(CPLVirtualMem *ctxt)
2167 : {
2168 61 : if (ctxt == nullptr || --(ctxt->nRefCount) > 0)
2169 8 : return;
2170 :
2171 53 : if (ctxt->pVMemBase != nullptr)
2172 : {
2173 8 : CPLVirtualMemFree(ctxt->pVMemBase);
2174 8 : if (ctxt->pfnFreeUserData != nullptr)
2175 8 : ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2176 8 : CPLFree(ctxt);
2177 8 : return;
2178 : }
2179 :
2180 : #ifdef HAVE_MMAP
2181 45 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
2182 28 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
2183 : #endif
2184 : #ifdef HAVE_VIRTUAL_MEM_VMA
2185 45 : if (ctxt->eType == VIRTUAL_MEM_TYPE_VMA)
2186 17 : CPLVirtualMemFreeFileMemoryMapped(
2187 : reinterpret_cast<CPLVirtualMemVMA *>(ctxt));
2188 : #endif
2189 :
2190 45 : if (ctxt->pfnFreeUserData != nullptr)
2191 16 : ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2192 45 : CPLFree(ctxt);
2193 : }
2194 :
2195 : /************************************************************************/
2196 : /* CPLVirtualMemGetAddr() */
2197 : /************************************************************************/
2198 :
2199 302 : void *CPLVirtualMemGetAddr(CPLVirtualMem *ctxt)
2200 : {
2201 302 : return ctxt->pData;
2202 : }
2203 :
2204 : /************************************************************************/
2205 : /* CPLVirtualMemIsFileMapping() */
2206 : /************************************************************************/
2207 :
2208 4 : int CPLVirtualMemIsFileMapping(CPLVirtualMem *ctxt)
2209 : {
2210 4 : return ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2211 : }
2212 :
2213 : /************************************************************************/
2214 : /* CPLVirtualMemGetAccessMode() */
2215 : /************************************************************************/
2216 :
2217 0 : CPLVirtualMemAccessMode CPLVirtualMemGetAccessMode(CPLVirtualMem *ctxt)
2218 : {
2219 0 : return ctxt->eAccessMode;
2220 : }
2221 :
2222 : /************************************************************************/
2223 : /* CPLVirtualMemGetPageSize() */
2224 : /************************************************************************/
2225 :
2226 5 : size_t CPLVirtualMemGetPageSize(CPLVirtualMem *ctxt)
2227 : {
2228 5 : return ctxt->nPageSize;
2229 : }
2230 :
2231 : /************************************************************************/
2232 : /* CPLVirtualMemGetSize() */
2233 : /************************************************************************/
2234 :
2235 271 : size_t CPLVirtualMemGetSize(CPLVirtualMem *ctxt)
2236 : {
2237 271 : return ctxt->nSize;
2238 : }
2239 :
2240 : /************************************************************************/
2241 : /* CPLVirtualMemIsAccessThreadSafe() */
2242 : /************************************************************************/
2243 :
2244 1 : int CPLVirtualMemIsAccessThreadSafe(CPLVirtualMem *ctxt)
2245 : {
2246 1 : return !ctxt->bSingleThreadUsage;
2247 : }
2248 :
2249 : /************************************************************************/
2250 : /* CPLVirtualMemDerivedNew() */
2251 : /************************************************************************/
2252 :
2253 8 : CPLVirtualMem *CPLVirtualMemDerivedNew(
2254 : CPLVirtualMem *pVMemBase, vsi_l_offset nOffset, vsi_l_offset nSize,
2255 : CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2256 : {
2257 8 : if (nOffset + nSize > pVMemBase->nSize)
2258 0 : return nullptr;
2259 :
2260 : CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2261 8 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2262 8 : if (ctxt == nullptr)
2263 0 : return nullptr;
2264 :
2265 8 : ctxt->eType = pVMemBase->eType;
2266 8 : ctxt->nRefCount = 1;
2267 8 : ctxt->pVMemBase = pVMemBase;
2268 8 : pVMemBase->nRefCount++;
2269 8 : ctxt->eAccessMode = pVMemBase->eAccessMode;
2270 8 : ctxt->pData = static_cast<GByte *>(pVMemBase->pData) + nOffset;
2271 8 : ctxt->pDataToFree = nullptr;
2272 8 : ctxt->nSize = static_cast<size_t>(nSize);
2273 8 : ctxt->nPageSize = pVMemBase->nPageSize;
2274 8 : ctxt->bSingleThreadUsage = CPL_TO_BOOL(pVMemBase->bSingleThreadUsage);
2275 8 : ctxt->pfnFreeUserData = pfnFreeUserData;
2276 8 : ctxt->pCbkUserData = pCbkUserData;
2277 :
2278 8 : return ctxt;
2279 : }
|