Line data Source code
1 : /**********************************************************************
2 : *
3 : * Name: cpl_virtualmem.cpp
4 : * Project: CPL - Common Portability Library
5 : * Purpose: Virtual memory
6 : * Author: Even Rouault, <even dot rouault at spatialys.com>
7 : *
8 : **********************************************************************
9 : * Copyright (c) 2014, Even Rouault <even dot rouault at spatialys.com>
10 : *
11 : * SPDX-License-Identifier: MIT
12 : ****************************************************************************/
13 :
14 : #ifndef _GNU_SOURCE
15 : #define _GNU_SOURCE
16 : #endif
17 :
18 : // to have off_t on 64bit possibly
19 : #ifndef _FILE_OFFSET_BITS
20 : #define _FILE_OFFSET_BITS 64
21 : #endif
22 :
23 : #include "cpl_virtualmem.h"
24 :
25 : #include <cassert>
26 : // TODO(schwehr): Should ucontext.h be included?
27 : // #include <ucontext.h>
28 :
29 : #include "cpl_atomic_ops.h"
30 : #include "cpl_config.h"
31 : #include "cpl_conv.h"
32 : #include "cpl_error.h"
33 : #include "cpl_multiproc.h"
34 :
35 : #ifdef NDEBUG
36 : // Non NDEBUG: Ignore the result.
37 : #define IGNORE_OR_ASSERT_IN_DEBUG(expr) CPL_IGNORE_RET_VAL((expr))
38 : #else
39 : // Debug: Assert.
40 : #define IGNORE_OR_ASSERT_IN_DEBUG(expr) assert((expr))
41 : #endif
42 :
43 : #if defined(__linux) && defined(CPL_MULTIPROC_PTHREAD)
44 : #ifndef HAVE_5ARGS_MREMAP
45 : // FIXME? gcore/virtualmem.py tests fail/crash when HAVE_5ARGS_MREMAP
46 : // is not defined.
47 : #warning "HAVE_5ARGS_MREMAP not found. Disabling HAVE_VIRTUAL_MEM_VMA"
48 : #else
49 : #define HAVE_VIRTUAL_MEM_VMA
50 : #endif
51 : #endif
52 :
53 : #if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
54 : #include <unistd.h> // read, write, close, pipe, sysconf
55 : #include <sys/mman.h> // mmap, munmap, mremap
56 : #endif
57 :
58 : typedef enum
59 : {
60 : VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED,
61 : VIRTUAL_MEM_TYPE_VMA
62 : } CPLVirtualMemType;
63 :
64 : struct CPLVirtualMem
65 : {
66 : CPLVirtualMemType eType;
67 :
68 : struct CPLVirtualMem *pVMemBase;
69 : int nRefCount;
70 :
71 : CPLVirtualMemAccessMode eAccessMode;
72 :
73 : size_t nPageSize;
74 : // Aligned on nPageSize.
75 : void *pData;
76 : // Returned by mmap(), potentially lower than pData.
77 : void *pDataToFree;
78 : // Requested size (unrounded).
79 : size_t nSize;
80 :
81 : bool bSingleThreadUsage;
82 :
83 : void *pCbkUserData;
84 : CPLVirtualMemFreeUserData pfnFreeUserData;
85 : };
86 :
87 : #ifdef HAVE_VIRTUAL_MEM_VMA
88 :
89 : #include <sys/select.h> // select
90 : #include <sys/stat.h> // open()
91 : #include <sys/types.h> // open()
92 : #include <errno.h>
93 : #include <fcntl.h> // open()
94 : #include <signal.h> // sigaction
95 : #include <stdio.h>
96 : #include <stdlib.h>
97 : #include <string.h>
98 : #include <pthread.h>
99 :
100 : #ifndef HAVE_5ARGS_MREMAP
101 : #include "cpl_atomic_ops.h"
102 : #endif
103 :
104 : /* Linux specific (i.e. non POSIX compliant) features used:
105 : - returning from a SIGSEGV handler is clearly a POSIX violation, but in
106 : practice most POSIX systems should be happy.
107 : - mremap() with 5 args is Linux specific. It is used when the user
108 : callback is invited to fill a page, we currently mmap() a
109 : writable page, let it filled it, and afterwards mremap() that
110 : temporary page onto the location where the fault occurred.
111 : If we have no mremap(), the workaround is to pause other threads that
112 : consume the current view while we are updating the faulted page, otherwise
113 : a non-paused thread could access a page that is in the middle of being
114 : filled... The way we pause those threads is quite original : we send them
115 : a SIGUSR1 and wait that they are stuck in the temporary SIGUSR1 handler...
116 : - MAP_ANONYMOUS isn't documented in POSIX, but very commonly found
117 : (sometimes called MAP_ANON)
118 : - dealing with the limitation of number of memory mapping regions,
119 : and the 65536 limit.
120 : - other things I've not identified
121 : */
122 :
123 : #define ALIGN_DOWN(p, pagesize) \
124 : reinterpret_cast<void *>((reinterpret_cast<GUIntptr_t>(p)) / (pagesize) * \
125 : (pagesize))
126 : #define ALIGN_UP(p, pagesize) \
127 : reinterpret_cast<void *>( \
128 : (reinterpret_cast<GUIntptr_t>(p) + (pagesize)-1) / (pagesize) * \
129 : (pagesize))
130 :
131 : #define DEFAULT_PAGE_SIZE (256 * 256)
132 : #define MAXIMUM_PAGE_SIZE (32 * 1024 * 1024)
133 :
134 : // Linux Kernel limit.
135 : #define MAXIMUM_COUNT_OF_MAPPINGS 65536
136 :
137 : #define BYEBYE_ADDR (reinterpret_cast<void *>(~static_cast<size_t>(0)))
138 :
139 : #define MAPPING_FOUND "yeah"
140 : #define MAPPING_NOT_FOUND "doh!"
141 :
142 : #define SET_BIT(ar, bitnumber) ar[(bitnumber) / 8] |= 1 << ((bitnumber) % 8)
143 : #define UNSET_BIT(ar, bitnumber) \
144 : ar[(bitnumber) / 8] &= ~(1 << ((bitnumber) % 8))
145 : #define TEST_BIT(ar, bitnumber) (ar[(bitnumber) / 8] & (1 << ((bitnumber) % 8)))
146 :
147 : typedef enum
148 : {
149 : OP_LOAD,
150 : OP_STORE,
151 : OP_MOVS_RSI_RDI,
152 : OP_UNKNOWN
153 : } OpType;
154 :
155 : typedef struct
156 : {
157 : CPLVirtualMem sBase;
158 :
159 : GByte *pabitMappedPages;
160 : GByte *pabitRWMappedPages;
161 :
162 : int nCacheMaxSizeInPages; // Maximum size of page array.
163 : int *panLRUPageIndices; // Array with indices of cached pages.
164 : int iLRUStart; // Index in array where to
165 : // write next page index.
166 : int nLRUSize; // Current size of the array.
167 :
168 : int iLastPage; // Last page accessed.
169 : int nRetry; // Number of consecutive
170 : // retries to that last page.
171 :
172 : CPLVirtualMemCachePageCbk pfnCachePage; // Called when a page is
173 : // mapped.
174 : CPLVirtualMemUnCachePageCbk pfnUnCachePage; // Called when a (writable)
175 : // page is unmapped.
176 :
177 : #ifndef HAVE_5ARGS_MREMAP
178 : CPLMutex *hMutexThreadArray;
179 : int nThreads;
180 : pthread_t *pahThreads;
181 : #endif
182 : } CPLVirtualMemVMA;
183 :
184 : typedef struct
185 : {
186 : // hVirtualMemManagerMutex protects the 2 following variables.
187 : CPLVirtualMemVMA **pasVirtualMem;
188 : int nVirtualMemCount;
189 :
190 : int pipefd_to_thread[2];
191 : int pipefd_from_thread[2];
192 : int pipefd_wait_thread[2];
193 : CPLJoinableThread *hHelperThread;
194 :
195 : // Using sigaction without testing HAVE_SIGACTION since we are in a Linux
196 : // specific code path
197 : struct sigaction oldact;
198 : } CPLVirtualMemManager;
199 :
200 : typedef struct
201 : {
202 : void *pFaultAddr;
203 : OpType opType;
204 : pthread_t hRequesterThread;
205 : } CPLVirtualMemMsgToWorkerThread;
206 :
207 : // TODO: Singletons.
208 : static CPLVirtualMemManager *pVirtualMemManager = nullptr;
209 : static CPLMutex *hVirtualMemManagerMutex = nullptr;
210 :
211 : static bool CPLVirtualMemManagerInit();
212 :
213 : #ifdef DEBUG_VIRTUALMEM
214 :
215 : /************************************************************************/
216 : /* fprintfstderr() */
217 : /************************************************************************/
218 :
219 : // This function may be called from signal handlers where most functions
220 : // from the C library are unsafe to be called. fprintf() is clearly one
221 : // of those functions (see
222 : // http://stackoverflow.com/questions/4554129/linux-glibc-can-i-use-fprintf-in-signal-handler)
223 : // vsnprintf() is *probably* safer with respect to that (but there is no
224 : // guarantee though).
225 : // write() is async-signal-safe.
226 : static void fprintfstderr(const char *fmt, ...)
227 : {
228 : char buffer[80] = {};
229 : va_list ap;
230 : va_start(ap, fmt);
231 : vsnprintf(buffer, sizeof(buffer), fmt, ap);
232 : va_end(ap);
233 : int offset = 0;
234 : while (true)
235 : {
236 : const size_t nSizeToWrite = strlen(buffer + offset);
237 : int ret = static_cast<int>(write(2, buffer + offset, nSizeToWrite));
238 : if (ret < 0 && errno == EINTR)
239 : {
240 : }
241 : else
242 : {
243 : if (ret == static_cast<int>(nSizeToWrite))
244 : break;
245 : offset += ret;
246 : }
247 : }
248 : }
249 :
250 : #endif
251 :
252 : /************************************************************************/
253 : /* CPLVirtualMemManagerRegisterVirtualMem() */
254 : /************************************************************************/
255 :
256 17 : static bool CPLVirtualMemManagerRegisterVirtualMem(CPLVirtualMemVMA *ctxt)
257 : {
258 17 : if (!CPLVirtualMemManagerInit())
259 0 : return false;
260 :
261 17 : bool bSuccess = true;
262 17 : IGNORE_OR_ASSERT_IN_DEBUG(ctxt);
263 17 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
264 : CPLVirtualMemVMA **pasVirtualMemNew = static_cast<CPLVirtualMemVMA **>(
265 17 : VSI_REALLOC_VERBOSE(pVirtualMemManager->pasVirtualMem,
266 : sizeof(CPLVirtualMemVMA *) *
267 : (pVirtualMemManager->nVirtualMemCount + 1)));
268 17 : if (pasVirtualMemNew == nullptr)
269 : {
270 0 : bSuccess = false;
271 : }
272 : else
273 : {
274 17 : pVirtualMemManager->pasVirtualMem = pasVirtualMemNew;
275 : pVirtualMemManager
276 17 : ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount] = ctxt;
277 17 : pVirtualMemManager->nVirtualMemCount++;
278 : }
279 17 : CPLReleaseMutex(hVirtualMemManagerMutex);
280 17 : return bSuccess;
281 : }
282 :
283 : /************************************************************************/
284 : /* CPLVirtualMemManagerUnregisterVirtualMem() */
285 : /************************************************************************/
286 :
287 17 : static void CPLVirtualMemManagerUnregisterVirtualMem(CPLVirtualMemVMA *ctxt)
288 : {
289 17 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
290 23 : for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
291 : {
292 23 : if (pVirtualMemManager->pasVirtualMem[i] == ctxt)
293 : {
294 17 : if (i < pVirtualMemManager->nVirtualMemCount - 1)
295 : {
296 9 : memmove(pVirtualMemManager->pasVirtualMem + i,
297 9 : pVirtualMemManager->pasVirtualMem + i + 1,
298 : sizeof(CPLVirtualMem *) *
299 9 : (pVirtualMemManager->nVirtualMemCount - i - 1));
300 : }
301 17 : pVirtualMemManager->nVirtualMemCount--;
302 17 : break;
303 : }
304 : }
305 17 : CPLReleaseMutex(hVirtualMemManagerMutex);
306 17 : }
307 :
308 : /************************************************************************/
309 : /* CPLVirtualMemNew() */
310 : /************************************************************************/
311 :
312 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt);
313 :
314 17 : CPLVirtualMem *CPLVirtualMemNew(size_t nSize, size_t nCacheSize,
315 : size_t nPageSizeHint, int bSingleThreadUsage,
316 : CPLVirtualMemAccessMode eAccessMode,
317 : CPLVirtualMemCachePageCbk pfnCachePage,
318 : CPLVirtualMemUnCachePageCbk pfnUnCachePage,
319 : CPLVirtualMemFreeUserData pfnFreeUserData,
320 : void *pCbkUserData)
321 : {
322 17 : size_t nMinPageSize = CPLGetPageSize();
323 17 : size_t nPageSize = DEFAULT_PAGE_SIZE;
324 :
325 17 : IGNORE_OR_ASSERT_IN_DEBUG(nSize > 0);
326 17 : IGNORE_OR_ASSERT_IN_DEBUG(pfnCachePage != nullptr);
327 :
328 17 : if (nPageSizeHint >= nMinPageSize && nPageSizeHint <= MAXIMUM_PAGE_SIZE)
329 : {
330 5 : if ((nPageSizeHint % nMinPageSize) == 0)
331 5 : nPageSize = nPageSizeHint;
332 : else
333 : {
334 0 : int nbits = 0;
335 0 : nPageSize = static_cast<size_t>(nPageSizeHint);
336 0 : do
337 : {
338 0 : nPageSize >>= 1;
339 0 : nbits++;
340 0 : } while (nPageSize > 0);
341 0 : nPageSize = static_cast<size_t>(1) << (nbits - 1);
342 0 : if (nPageSize < static_cast<size_t>(nPageSizeHint))
343 0 : nPageSize <<= 1;
344 : }
345 : }
346 :
347 17 : if ((nPageSize % nMinPageSize) != 0)
348 0 : nPageSize = nMinPageSize;
349 :
350 17 : if (nCacheSize > nSize)
351 16 : nCacheSize = nSize;
352 1 : else if (nCacheSize == 0)
353 0 : nCacheSize = 1;
354 :
355 17 : int nMappings = 0;
356 :
357 : // Linux specific:
358 : // Count the number of existing memory mappings.
359 17 : FILE *f = fopen("/proc/self/maps", "rb");
360 17 : if (f != nullptr)
361 : {
362 17 : char buffer[80] = {};
363 38516 : while (fgets(buffer, sizeof(buffer), f) != nullptr)
364 38499 : nMappings++;
365 17 : fclose(f);
366 : }
367 :
368 17 : size_t nCacheMaxSizeInPages = 0;
369 : while (true)
370 : {
371 : // /proc/self/maps must not have more than 65K lines.
372 17 : nCacheMaxSizeInPages = (nCacheSize + 2 * nPageSize - 1) / nPageSize;
373 17 : if (nCacheMaxSizeInPages >
374 17 : static_cast<size_t>((MAXIMUM_COUNT_OF_MAPPINGS * 9 / 10) -
375 : nMappings))
376 0 : nPageSize <<= 1;
377 : else
378 17 : break;
379 : }
380 17 : size_t nRoundedMappingSize =
381 17 : ((nSize + 2 * nPageSize - 1) / nPageSize) * nPageSize;
382 17 : void *pData = mmap(nullptr, nRoundedMappingSize, PROT_NONE,
383 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
384 17 : if (pData == MAP_FAILED)
385 : {
386 0 : perror("mmap");
387 0 : return nullptr;
388 : }
389 : CPLVirtualMemVMA *ctxt = static_cast<CPLVirtualMemVMA *>(
390 17 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMemVMA)));
391 17 : if (ctxt == nullptr)
392 : {
393 0 : munmap(pData, nRoundedMappingSize);
394 0 : return nullptr;
395 : }
396 17 : ctxt->sBase.nRefCount = 1;
397 17 : ctxt->sBase.eType = VIRTUAL_MEM_TYPE_VMA;
398 17 : ctxt->sBase.eAccessMode = eAccessMode;
399 17 : ctxt->sBase.pDataToFree = pData;
400 17 : ctxt->sBase.pData = ALIGN_UP(pData, nPageSize);
401 17 : ctxt->sBase.nPageSize = nPageSize;
402 17 : ctxt->sBase.nSize = nSize;
403 17 : ctxt->sBase.bSingleThreadUsage = CPL_TO_BOOL(bSingleThreadUsage);
404 17 : ctxt->sBase.pfnFreeUserData = pfnFreeUserData;
405 17 : ctxt->sBase.pCbkUserData = pCbkUserData;
406 :
407 17 : ctxt->pabitMappedPages = static_cast<GByte *>(
408 17 : VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
409 17 : if (ctxt->pabitMappedPages == nullptr)
410 : {
411 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
412 0 : CPLFree(ctxt);
413 0 : return nullptr;
414 : }
415 17 : ctxt->pabitRWMappedPages = static_cast<GByte *>(
416 17 : VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
417 17 : if (ctxt->pabitRWMappedPages == nullptr)
418 : {
419 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
420 0 : CPLFree(ctxt);
421 0 : return nullptr;
422 : }
423 : // Need at least 2 pages in case for a rep movs instruction
424 : // that operate in the view.
425 17 : ctxt->nCacheMaxSizeInPages = static_cast<int>(nCacheMaxSizeInPages);
426 17 : ctxt->panLRUPageIndices = static_cast<int *>(
427 17 : VSI_MALLOC_VERBOSE(ctxt->nCacheMaxSizeInPages * sizeof(int)));
428 17 : if (ctxt->panLRUPageIndices == nullptr)
429 : {
430 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
431 0 : CPLFree(ctxt);
432 0 : return nullptr;
433 : }
434 17 : ctxt->iLRUStart = 0;
435 17 : ctxt->nLRUSize = 0;
436 17 : ctxt->iLastPage = -1;
437 17 : ctxt->nRetry = 0;
438 17 : ctxt->pfnCachePage = pfnCachePage;
439 17 : ctxt->pfnUnCachePage = pfnUnCachePage;
440 :
441 : #ifndef HAVE_5ARGS_MREMAP
442 : if (!ctxt->sBase.bSingleThreadUsage)
443 : {
444 : ctxt->hMutexThreadArray = CPLCreateMutex();
445 : IGNORE_OR_ASSERT_IN_DEBUG(ctxt->hMutexThreadArray != nullptr);
446 : CPLReleaseMutex(ctxt->hMutexThreadArray);
447 : ctxt->nThreads = 0;
448 : ctxt->pahThreads = nullptr;
449 : }
450 : #endif
451 :
452 17 : if (!CPLVirtualMemManagerRegisterVirtualMem(ctxt))
453 : {
454 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
455 0 : CPLFree(ctxt);
456 0 : return nullptr;
457 : }
458 :
459 17 : return reinterpret_cast<CPLVirtualMem *>(ctxt);
460 : }
461 :
462 : /************************************************************************/
463 : /* CPLVirtualMemFreeFileMemoryMapped() */
464 : /************************************************************************/
465 :
466 17 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt)
467 : {
468 17 : CPLVirtualMemManagerUnregisterVirtualMem(ctxt);
469 :
470 17 : size_t nRoundedMappingSize =
471 17 : ((ctxt->sBase.nSize + 2 * ctxt->sBase.nPageSize - 1) /
472 17 : ctxt->sBase.nPageSize) *
473 17 : ctxt->sBase.nPageSize;
474 17 : if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
475 7 : ctxt->pabitRWMappedPages != nullptr && ctxt->pfnUnCachePage != nullptr)
476 : {
477 27 : for (size_t i = 0; i < nRoundedMappingSize / ctxt->sBase.nPageSize; i++)
478 : {
479 20 : if (TEST_BIT(ctxt->pabitRWMappedPages, i))
480 : {
481 13 : void *addr = static_cast<char *>(ctxt->sBase.pData) +
482 13 : i * ctxt->sBase.nPageSize;
483 13 : ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
484 13 : i * ctxt->sBase.nPageSize, addr,
485 : ctxt->sBase.nPageSize,
486 : ctxt->sBase.pCbkUserData);
487 : }
488 : }
489 : }
490 17 : int nRet = munmap(ctxt->sBase.pDataToFree, nRoundedMappingSize);
491 17 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
492 17 : CPLFree(ctxt->pabitMappedPages);
493 17 : CPLFree(ctxt->pabitRWMappedPages);
494 17 : CPLFree(ctxt->panLRUPageIndices);
495 : #ifndef HAVE_5ARGS_MREMAP
496 : if (!ctxt->sBase.bSingleThreadUsage)
497 : {
498 : CPLFree(ctxt->pahThreads);
499 : CPLDestroyMutex(ctxt->hMutexThreadArray);
500 : }
501 : #endif
502 17 : }
503 :
504 : #ifndef HAVE_5ARGS_MREMAP
505 :
506 : static volatile int nCountThreadsInSigUSR1 = 0;
507 : static volatile int nWaitHelperThread = 0;
508 :
509 : /************************************************************************/
510 : /* CPLVirtualMemSIGUSR1Handler() */
511 : /************************************************************************/
512 :
513 : static void CPLVirtualMemSIGUSR1Handler(int /* signum_unused */,
514 : siginfo_t * /* the_info_unused */,
515 : void * /* the_ctxt_unused */)
516 : {
517 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
518 : fprintfstderr("entering CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
519 : #endif
520 : // Rouault guesses this is only POSIX correct if it is implemented by an
521 : // intrinsic.
522 : CPLAtomicInc(&nCountThreadsInSigUSR1);
523 : while (nWaitHelperThread)
524 : // Not explicitly indicated as signal-async-safe, but hopefully ok.
525 : usleep(1);
526 : CPLAtomicDec(&nCountThreadsInSigUSR1);
527 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
528 : fprintfstderr("leaving CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
529 : #endif
530 : }
531 : #endif
532 :
533 : /************************************************************************/
534 : /* CPLVirtualMemDeclareThread() */
535 : /************************************************************************/
536 :
537 2 : void CPLVirtualMemDeclareThread(CPLVirtualMem *ctxt)
538 : {
539 2 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
540 0 : return;
541 : #ifndef HAVE_5ARGS_MREMAP
542 : CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
543 : IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
544 : CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
545 : ctxtVMA->pahThreads = static_cast<pthread_t *>(CPLRealloc(
546 : ctxtVMA->pahThreads, (ctxtVMA->nThreads + 1) * sizeof(pthread_t)));
547 : ctxtVMA->pahThreads[ctxtVMA->nThreads] = pthread_self();
548 : ctxtVMA->nThreads++;
549 :
550 : CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
551 : #endif
552 : }
553 :
554 : /************************************************************************/
555 : /* CPLVirtualMemUnDeclareThread() */
556 : /************************************************************************/
557 :
558 2 : void CPLVirtualMemUnDeclareThread(CPLVirtualMem *ctxt)
559 : {
560 2 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
561 0 : return;
562 : #ifndef HAVE_5ARGS_MREMAP
563 : CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
564 : pthread_t self = pthread_self();
565 : IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
566 : CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
567 : for (int i = 0; i < ctxtVMA->nThreads; i++)
568 : {
569 : if (ctxtVMA->pahThreads[i] == self)
570 : {
571 : if (i < ctxtVMA->nThreads - 1)
572 : memmove(ctxtVMA->pahThreads + i + 1, ctxtVMA->pahThreads + i,
573 : (ctxtVMA->nThreads - 1 - i) * sizeof(pthread_t));
574 : ctxtVMA->nThreads--;
575 : break;
576 : }
577 : }
578 :
579 : CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
580 : #endif
581 : }
582 :
583 : /************************************************************************/
584 : /* CPLVirtualMemGetPageToFill() */
585 : /************************************************************************/
586 :
587 : // Must be paired with CPLVirtualMemAddPage.
588 60829 : static void *CPLVirtualMemGetPageToFill(CPLVirtualMemVMA *ctxt,
589 : void *start_page_addr)
590 : {
591 60829 : void *pPageToFill = nullptr;
592 :
593 60829 : if (ctxt->sBase.bSingleThreadUsage)
594 : {
595 0 : pPageToFill = start_page_addr;
596 0 : const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
597 : PROT_READ | PROT_WRITE);
598 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
599 : }
600 : else
601 : {
602 : #ifndef HAVE_5ARGS_MREMAP
603 : CPLAcquireMutex(ctxt->hMutexThreadArray, 1000.0);
604 : if (ctxt->nThreads == 1)
605 : {
606 : pPageToFill = start_page_addr;
607 : const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
608 : PROT_READ | PROT_WRITE);
609 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
610 : }
611 : else
612 : #endif
613 : {
614 : // Allocate a temporary writable page that the user
615 : // callback can fill.
616 : pPageToFill =
617 60829 : mmap(nullptr, ctxt->sBase.nPageSize, PROT_READ | PROT_WRITE,
618 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
619 60829 : IGNORE_OR_ASSERT_IN_DEBUG(pPageToFill != MAP_FAILED);
620 : }
621 : }
622 60829 : return pPageToFill;
623 : }
624 :
625 : /************************************************************************/
626 : /* CPLVirtualMemAddPage() */
627 : /************************************************************************/
628 :
629 60829 : static void CPLVirtualMemAddPage(CPLVirtualMemVMA *ctxt, void *target_addr,
630 : void *pPageToFill, OpType opType,
631 : pthread_t hRequesterThread)
632 : {
633 60829 : const int iPage =
634 60829 : static_cast<int>((static_cast<char *>(target_addr) -
635 60829 : static_cast<char *>(ctxt->sBase.pData)) /
636 60829 : ctxt->sBase.nPageSize);
637 60829 : if (ctxt->nLRUSize == ctxt->nCacheMaxSizeInPages)
638 : {
639 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
640 : fprintfstderr("uncaching page %d\n", iPage);
641 : #endif
642 60750 : int nOldPage = ctxt->panLRUPageIndices[ctxt->iLRUStart];
643 60750 : void *addr = static_cast<char *>(ctxt->sBase.pData) +
644 60750 : nOldPage * ctxt->sBase.nPageSize;
645 60750 : if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
646 0 : ctxt->pfnUnCachePage != nullptr &&
647 0 : TEST_BIT(ctxt->pabitRWMappedPages, nOldPage))
648 : {
649 0 : size_t nToBeEvicted = ctxt->sBase.nPageSize;
650 0 : if (static_cast<char *>(addr) + nToBeEvicted >=
651 0 : static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
652 0 : nToBeEvicted = static_cast<char *>(ctxt->sBase.pData) +
653 0 : ctxt->sBase.nSize - static_cast<char *>(addr);
654 :
655 0 : ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
656 0 : nOldPage * ctxt->sBase.nPageSize, addr,
657 : nToBeEvicted, ctxt->sBase.pCbkUserData);
658 : }
659 : // "Free" the least recently used page.
660 60750 : UNSET_BIT(ctxt->pabitMappedPages, nOldPage);
661 60750 : UNSET_BIT(ctxt->pabitRWMappedPages, nOldPage);
662 : // Free the old page.
663 : // Not sure how portable it is to do that that way.
664 : const void *const pRet =
665 60750 : mmap(addr, ctxt->sBase.nPageSize, PROT_NONE,
666 : MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
667 60750 : IGNORE_OR_ASSERT_IN_DEBUG(pRet == addr);
668 : // cppcheck-suppress memleak
669 : }
670 60829 : ctxt->panLRUPageIndices[ctxt->iLRUStart] = iPage;
671 60829 : ctxt->iLRUStart = (ctxt->iLRUStart + 1) % ctxt->nCacheMaxSizeInPages;
672 60829 : if (ctxt->nLRUSize < ctxt->nCacheMaxSizeInPages)
673 : {
674 79 : ctxt->nLRUSize++;
675 : }
676 60829 : SET_BIT(ctxt->pabitMappedPages, iPage);
677 :
678 60829 : if (ctxt->sBase.bSingleThreadUsage)
679 : {
680 0 : if (opType == OP_STORE &&
681 0 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
682 : {
683 : // Let (and mark) the page writable since the instruction that
684 : // triggered the fault is a store.
685 0 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
686 : }
687 0 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
688 : {
689 : const int nRet =
690 0 : mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
691 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
692 : }
693 : }
694 : else
695 : {
696 : #ifdef HAVE_5ARGS_MREMAP
697 : (void)hRequesterThread;
698 :
699 60829 : if (opType == OP_STORE &&
700 8 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
701 : {
702 : // Let (and mark) the page writable since the instruction that
703 : // triggered the fault is a store.
704 8 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
705 : }
706 60821 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
707 : {
708 : // Turn the temporary page read-only before remapping it.
709 : // Only turn it writtable when a new fault occurs (and the
710 : // mapping is writable).
711 : const int nRet =
712 69 : mprotect(pPageToFill, ctxt->sBase.nPageSize, PROT_READ);
713 69 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
714 : }
715 : /* Can now remap the pPageToFill onto the target page */
716 : const void *const pRet =
717 60829 : mremap(pPageToFill, ctxt->sBase.nPageSize, ctxt->sBase.nPageSize,
718 : MREMAP_MAYMOVE | MREMAP_FIXED, target_addr);
719 60829 : IGNORE_OR_ASSERT_IN_DEBUG(pRet == target_addr);
720 :
721 : #else
722 : if (ctxt->nThreads > 1)
723 : {
724 : /* Pause threads that share this mem view */
725 : CPLAtomicInc(&nWaitHelperThread);
726 :
727 : /* Install temporary SIGUSR1 signal handler */
728 : struct sigaction act, oldact;
729 : act.sa_sigaction = CPLVirtualMemSIGUSR1Handler;
730 : sigemptyset(&act.sa_mask);
731 : /* We don't want the sigsegv handler to be called when we are */
732 : /* running the sigusr1 handler */
733 : IGNORE_OR_ASSERT_IN_DEBUG(sigaddset(&act.sa_mask, SIGSEGV) == 0);
734 : act.sa_flags = 0;
735 : IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &act, &oldact) == 0);
736 :
737 : for (int i = 0; i < ctxt->nThreads; i++)
738 : {
739 : if (ctxt->pahThreads[i] != hRequesterThread)
740 : {
741 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
742 : fprintfstderr("stopping thread %X\n", ctxt->pahThreads[i]);
743 : #endif
744 : IGNORE_OR_ASSERT_IN_DEBUG(
745 : pthread_kill(ctxt->pahThreads[i], SIGUSR1) == 0);
746 : }
747 : }
748 :
749 : /* Wait that they are all paused */
750 : while (nCountThreadsInSigUSR1 != ctxt->nThreads - 1)
751 : usleep(1);
752 :
753 : /* Restore old SIGUSR1 signal handler */
754 : IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &oldact, nullptr) ==
755 : 0);
756 :
757 : int nRet = mprotect(target_addr, ctxt->sBase.nPageSize,
758 : PROT_READ | PROT_WRITE);
759 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
760 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
761 : fprintfstderr("memcpying page %d\n", iPage);
762 : #endif
763 : memcpy(target_addr, pPageToFill, ctxt->sBase.nPageSize);
764 :
765 : if (opType == OP_STORE &&
766 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
767 : {
768 : // Let (and mark) the page writable since the instruction that
769 : // triggered the fault is a store.
770 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
771 : }
772 : else
773 : {
774 : nRet = mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
775 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
776 : }
777 :
778 : /* Wake up sleeping threads */
779 : CPLAtomicDec(&nWaitHelperThread);
780 : while (nCountThreadsInSigUSR1 != 0)
781 : usleep(1);
782 :
783 : IGNORE_OR_ASSERT_IN_DEBUG(
784 : munmap(pPageToFill, ctxt->sBase.nPageSize) == 0);
785 : }
786 : else
787 : {
788 : if (opType == OP_STORE &&
789 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
790 : {
791 : // Let (and mark) the page writable since the instruction that
792 : // triggered the fault is a store.
793 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
794 : }
795 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
796 : {
797 : const int nRet2 =
798 : mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
799 : IGNORE_OR_ASSERT_IN_DEBUG(nRet2 == 0);
800 : }
801 : }
802 :
803 : CPLReleaseMutex(ctxt->hMutexThreadArray);
804 : #endif
805 : }
806 : // cppcheck-suppress memleak
807 60829 : }
808 :
809 : /************************************************************************/
810 : /* CPLVirtualMemGetOpTypeImm() */
811 : /************************************************************************/
812 :
813 : #if defined(__x86_64__) || defined(__i386__)
814 0 : static OpType CPLVirtualMemGetOpTypeImm(GByte val_rip)
815 : {
816 0 : OpType opType = OP_UNKNOWN;
817 0 : if ((/*val_rip >= 0x00 &&*/ val_rip <= 0x07) ||
818 0 : (val_rip >= 0x40 && val_rip <= 0x47)) // add $, (X)
819 0 : opType = OP_STORE;
820 0 : if ((val_rip >= 0x08 && val_rip <= 0x0f) ||
821 0 : (val_rip >= 0x48 && val_rip <= 0x4f)) // or $, (X)
822 0 : opType = OP_STORE;
823 0 : if ((val_rip >= 0x20 && val_rip <= 0x27) ||
824 0 : (val_rip >= 0x60 && val_rip <= 0x67)) // and $, (X)
825 0 : opType = OP_STORE;
826 0 : if ((val_rip >= 0x28 && val_rip <= 0x2f) ||
827 0 : (val_rip >= 0x68 && val_rip <= 0x6f)) // sub $, (X)
828 0 : opType = OP_STORE;
829 0 : if ((val_rip >= 0x30 && val_rip <= 0x37) ||
830 0 : (val_rip >= 0x70 && val_rip <= 0x77)) // xor $, (X)
831 0 : opType = OP_STORE;
832 0 : if ((val_rip >= 0x38 && val_rip <= 0x3f) ||
833 0 : (val_rip >= 0x78 && val_rip <= 0x7f)) // cmp $, (X)
834 0 : opType = OP_LOAD;
835 0 : return opType;
836 : }
837 : #endif
838 :
839 : /************************************************************************/
840 : /* CPLVirtualMemGetOpType() */
841 : /************************************************************************/
842 :
843 : // Don't need exhaustivity. It is just a hint for an optimization:
844 : // If the fault occurs on a store operation, then we can directly put
845 : // the page in writable mode if the mapping allows it.
846 :
847 : #if defined(__x86_64__) || defined(__i386__)
848 100456 : static OpType CPLVirtualMemGetOpType(const GByte *rip)
849 : {
850 100456 : OpType opType = OP_UNKNOWN;
851 :
852 : #if defined(__x86_64__) || defined(__i386__)
853 100456 : switch (rip[0])
854 : {
855 0 : case 0x00: /* add %al,(%rax) */
856 : case 0x01: /* add %eax,(%rax) */
857 0 : opType = OP_STORE;
858 0 : break;
859 0 : case 0x02: /* add (%rax),%al */
860 : case 0x03: /* add (%rax),%eax */
861 0 : opType = OP_LOAD;
862 0 : break;
863 :
864 0 : case 0x08: /* or %al,(%rax) */
865 : case 0x09: /* or %eax,(%rax) */
866 0 : opType = OP_STORE;
867 0 : break;
868 0 : case 0x0a: /* or (%rax),%al */
869 : case 0x0b: /* or (%rax),%eax */
870 0 : opType = OP_LOAD;
871 0 : break;
872 :
873 100434 : case 0x0f:
874 : {
875 100434 : switch (rip[1])
876 : {
877 100434 : case 0xb6: /* movzbl (%rax),%eax */
878 : case 0xb7: /* movzwl (%rax),%eax */
879 : case 0xbe: /* movsbl (%rax),%eax */
880 : case 0xbf: /* movswl (%rax),%eax */
881 100434 : opType = OP_LOAD;
882 100434 : break;
883 0 : default:
884 0 : break;
885 : }
886 100434 : break;
887 : }
888 8 : case 0xc6: /* movb $,(%rax) */
889 : case 0xc7: /* movl $,(%rax) */
890 8 : opType = OP_STORE;
891 8 : break;
892 :
893 0 : case 0x20: /* and %al,(%rax) */
894 : case 0x21: /* and %eax,(%rax) */
895 0 : opType = OP_STORE;
896 0 : break;
897 0 : case 0x22: /* and (%rax),%al */
898 : case 0x23: /* and (%rax),%eax */
899 0 : opType = OP_LOAD;
900 0 : break;
901 :
902 0 : case 0x28: /* sub %al,(%rax) */
903 : case 0x29: /* sub %eax,(%rax) */
904 0 : opType = OP_STORE;
905 0 : break;
906 0 : case 0x2a: /* sub (%rax),%al */
907 : case 0x2b: /* sub (%rax),%eax */
908 0 : opType = OP_LOAD;
909 0 : break;
910 :
911 0 : case 0x30: /* xor %al,(%rax) */
912 : case 0x31: /* xor %eax,(%rax) */
913 0 : opType = OP_STORE;
914 0 : break;
915 0 : case 0x32: /* xor (%rax),%al */
916 : case 0x33: /* xor (%rax),%eax */
917 0 : opType = OP_LOAD;
918 0 : break;
919 :
920 0 : case 0x38: /* cmp %al,(%rax) */
921 : case 0x39: /* cmp %eax,(%rax) */
922 0 : opType = OP_LOAD;
923 0 : break;
924 0 : case 0x40:
925 : {
926 0 : switch (rip[1])
927 : {
928 0 : case 0x00: /* add %spl,(%rax) */
929 0 : opType = OP_STORE;
930 0 : break;
931 0 : case 0x02: /* add (%rax),%spl */
932 0 : opType = OP_LOAD;
933 0 : break;
934 0 : case 0x28: /* sub %spl,(%rax) */
935 0 : opType = OP_STORE;
936 0 : break;
937 0 : case 0x2a: /* sub (%rax),%spl */
938 0 : opType = OP_LOAD;
939 0 : break;
940 0 : case 0x3a: /* cmp (%rax),%spl */
941 0 : opType = OP_LOAD;
942 0 : break;
943 0 : case 0x8a: /* mov (%rax),%spl */
944 0 : opType = OP_LOAD;
945 0 : break;
946 0 : default:
947 0 : break;
948 : }
949 0 : break;
950 : }
951 : #if defined(__x86_64__)
952 0 : case 0x41: /* reg=%al/%eax, X=%r8 */
953 : case 0x42: /* reg=%al/%eax, X=%rax,%r8,1 */
954 : case 0x43: /* reg=%al/%eax, X=%r8,%r8,1 */
955 : case 0x44: /* reg=%r8b/%r8w, X = %rax */
956 : case 0x45: /* reg=%r8b/%r8w, X = %r8 */
957 : case 0x46: /* reg=%r8b/%r8w, X = %rax,%r8,1 */
958 : case 0x47: /* reg=%r8b/%r8w, X = %r8,%r8,1 */
959 : {
960 0 : switch (rip[1])
961 : {
962 0 : case 0x00: /* add regb,(X) */
963 : case 0x01: /* add regl,(X) */
964 0 : opType = OP_STORE;
965 0 : break;
966 0 : case 0x02: /* add (X),regb */
967 : case 0x03: /* add (X),regl */
968 0 : opType = OP_LOAD;
969 0 : break;
970 0 : case 0x0f:
971 : {
972 0 : switch (rip[2])
973 : {
974 0 : case 0xb6: /* movzbl (X),regl */
975 : case 0xb7: /* movzwl (X),regl */
976 : case 0xbe: /* movsbl (X),regl */
977 : case 0xbf: /* movswl (X),regl */
978 0 : opType = OP_LOAD;
979 0 : break;
980 0 : default:
981 0 : break;
982 : }
983 0 : break;
984 : }
985 0 : case 0x28: /* sub regb,(X) */
986 : case 0x29: /* sub regl,(X) */
987 0 : opType = OP_STORE;
988 0 : break;
989 0 : case 0x2a: /* sub (X),regb */
990 : case 0x2b: /* sub (X),regl */
991 0 : opType = OP_LOAD;
992 0 : break;
993 0 : case 0x38: /* cmp regb,(X) */
994 : case 0x39: /* cmp regl,(X) */
995 0 : opType = OP_LOAD;
996 0 : break;
997 0 : case 0x80: /* cmpb,... $,(X) */
998 : case 0x81: /* cmpl,... $,(X) */
999 : case 0x83: /* cmpl,... $,(X) */
1000 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1001 0 : break;
1002 0 : case 0x88: /* mov regb,(X) */
1003 : case 0x89: /* mov regl,(X) */
1004 0 : opType = OP_STORE;
1005 0 : break;
1006 0 : case 0x8a: /* mov (X),regb */
1007 : case 0x8b: /* mov (X),regl */
1008 0 : opType = OP_LOAD;
1009 0 : break;
1010 0 : case 0xc6: /* movb $,(X) */
1011 : case 0xc7: /* movl $,(X) */
1012 0 : opType = OP_STORE;
1013 0 : break;
1014 0 : case 0x84: /* test %al,(X) */
1015 0 : opType = OP_LOAD;
1016 0 : break;
1017 0 : case 0xf6: /* testb $,(X) or notb (X) */
1018 : case 0xf7: /* testl $,(X) or notl (X)*/
1019 : {
1020 0 : if (rip[2] < 0x10) /* test (X) */
1021 0 : opType = OP_LOAD;
1022 : else /* not (X) */
1023 0 : opType = OP_STORE;
1024 0 : break;
1025 : }
1026 0 : default:
1027 0 : break;
1028 : }
1029 0 : break;
1030 : }
1031 0 : case 0x48: /* reg=%rax, X=%rax or %rax,%rax,1 */
1032 : case 0x49: /* reg=%rax, X=%r8 or %r8,%rax,1 */
1033 : case 0x4a: /* reg=%rax, X=%rax,%r8,1 */
1034 : case 0x4b: /* reg=%rax, X=%r8,%r8,1 */
1035 : case 0x4c: /* reg=%r8, X=%rax or %rax,%rax,1 */
1036 : case 0x4d: /* reg=%r8, X=%r8 or %r8,%rax,1 */
1037 : case 0x4e: /* reg=%r8, X=%rax,%r8,1 */
1038 : case 0x4f: /* reg=%r8, X=%r8,%r8,1 */
1039 : {
1040 0 : switch (rip[1])
1041 : {
1042 0 : case 0x01: /* add reg,(X) */
1043 0 : opType = OP_STORE;
1044 0 : break;
1045 0 : case 0x03: /* add (X),reg */
1046 0 : opType = OP_LOAD;
1047 0 : break;
1048 :
1049 0 : case 0x09: /* or reg,(%rax) */
1050 0 : opType = OP_STORE;
1051 0 : break;
1052 0 : case 0x0b: /* or (%rax),reg */
1053 0 : opType = OP_LOAD;
1054 0 : break;
1055 0 : case 0x0f:
1056 : {
1057 0 : switch (rip[2])
1058 : {
1059 0 : case 0xc3: /* movnti reg,(X) */
1060 0 : opType = OP_STORE;
1061 0 : break;
1062 0 : default:
1063 0 : break;
1064 : }
1065 0 : break;
1066 : }
1067 0 : case 0x21: /* and reg,(X) */
1068 0 : opType = OP_STORE;
1069 0 : break;
1070 0 : case 0x23: /* and (X),reg */
1071 0 : opType = OP_LOAD;
1072 0 : break;
1073 :
1074 0 : case 0x29: /* sub reg,(X) */
1075 0 : opType = OP_STORE;
1076 0 : break;
1077 0 : case 0x2b: /* sub (X),reg */
1078 0 : opType = OP_LOAD;
1079 0 : break;
1080 :
1081 0 : case 0x31: /* xor reg,(X) */
1082 0 : opType = OP_STORE;
1083 0 : break;
1084 0 : case 0x33: /* xor (X),reg */
1085 0 : opType = OP_LOAD;
1086 0 : break;
1087 :
1088 0 : case 0x39: /* cmp reg,(X) */
1089 0 : opType = OP_LOAD;
1090 0 : break;
1091 :
1092 0 : case 0x81:
1093 : case 0x83:
1094 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1095 0 : break;
1096 :
1097 0 : case 0x85: /* test reg,(X) */
1098 0 : opType = OP_LOAD;
1099 0 : break;
1100 :
1101 0 : case 0x89: /* mov reg,(X) */
1102 0 : opType = OP_STORE;
1103 0 : break;
1104 0 : case 0x8b: /* mov (X),reg */
1105 0 : opType = OP_LOAD;
1106 0 : break;
1107 :
1108 0 : case 0xc7: /* movq $,(X) */
1109 0 : opType = OP_STORE;
1110 0 : break;
1111 :
1112 0 : case 0xf7:
1113 : {
1114 0 : if (rip[2] < 0x10) /* testq $,(X) */
1115 0 : opType = OP_LOAD;
1116 : else /* notq (X) */
1117 0 : opType = OP_STORE;
1118 0 : break;
1119 : }
1120 0 : default:
1121 0 : break;
1122 : }
1123 0 : break;
1124 : }
1125 : #endif
1126 0 : case 0x66:
1127 : {
1128 0 : switch (rip[1])
1129 : {
1130 0 : case 0x01: /* add %ax,(%rax) */
1131 0 : opType = OP_STORE;
1132 0 : break;
1133 0 : case 0x03: /* add (%rax),%ax */
1134 0 : opType = OP_LOAD;
1135 0 : break;
1136 0 : case 0x0f:
1137 : {
1138 0 : switch (rip[2])
1139 : {
1140 0 : case 0x2e: /* ucomisd (%rax),%xmm0 */
1141 0 : opType = OP_LOAD;
1142 0 : break;
1143 0 : case 0x6f: /* movdqa (%rax),%xmm0 */
1144 0 : opType = OP_LOAD;
1145 0 : break;
1146 0 : case 0x7f: /* movdqa %xmm0,(%rax) */
1147 0 : opType = OP_STORE;
1148 0 : break;
1149 0 : case 0xb6: /* movzbw (%rax),%ax */
1150 0 : opType = OP_LOAD;
1151 0 : break;
1152 0 : case 0xe7: /* movntdq %xmm0,(%rax) */
1153 0 : opType = OP_STORE;
1154 0 : break;
1155 0 : default:
1156 0 : break;
1157 : }
1158 0 : break;
1159 : }
1160 0 : case 0x29: /* sub %ax,(%rax) */
1161 0 : opType = OP_STORE;
1162 0 : break;
1163 0 : case 0x2b: /* sub (%rax),%ax */
1164 0 : opType = OP_LOAD;
1165 0 : break;
1166 0 : case 0x39: /* cmp %ax,(%rax) */
1167 0 : opType = OP_LOAD;
1168 0 : break;
1169 : #if defined(__x86_64__)
1170 0 : case 0x41: /* reg = %ax (or %xmm0), X = %r8 */
1171 : case 0x42: /* reg = %ax (or %xmm0), X = %rax,%r8,1 */
1172 : case 0x43: /* reg = %ax (or %xmm0), X = %r8,%r8,1 */
1173 : case 0x44: /* reg = %r8w (or %xmm8), X = %rax */
1174 : case 0x45: /* reg = %r8w (or %xmm8), X = %r8 */
1175 : case 0x46: /* reg = %r8w (or %xmm8), X = %rax,%r8,1 */
1176 : case 0x47: /* reg = %r8w (or %xmm8), X = %r8,%r8,1 */
1177 : {
1178 0 : switch (rip[2])
1179 : {
1180 0 : case 0x01: /* add reg,(X) */
1181 0 : opType = OP_STORE;
1182 0 : break;
1183 0 : case 0x03: /* add (X),reg */
1184 0 : opType = OP_LOAD;
1185 0 : break;
1186 0 : case 0x0f:
1187 : {
1188 0 : switch (rip[3])
1189 : {
1190 0 : case 0x2e: /* ucomisd (X),reg */
1191 0 : opType = OP_LOAD;
1192 0 : break;
1193 0 : case 0x6f: /* movdqa (X),reg */
1194 0 : opType = OP_LOAD;
1195 0 : break;
1196 0 : case 0x7f: /* movdqa reg,(X) */
1197 0 : opType = OP_STORE;
1198 0 : break;
1199 0 : case 0xb6: /* movzbw (X),reg */
1200 0 : opType = OP_LOAD;
1201 0 : break;
1202 0 : case 0xe7: /* movntdq reg,(X) */
1203 0 : opType = OP_STORE;
1204 0 : break;
1205 0 : default:
1206 0 : break;
1207 : }
1208 0 : break;
1209 : }
1210 0 : case 0x29: /* sub reg,(X) */
1211 0 : opType = OP_STORE;
1212 0 : break;
1213 0 : case 0x2b: /* sub (X),reg */
1214 0 : opType = OP_LOAD;
1215 0 : break;
1216 0 : case 0x39: /* cmp reg,(X) */
1217 0 : opType = OP_LOAD;
1218 0 : break;
1219 0 : case 0x81: /* cmpw,... $,(X) */
1220 : case 0x83: /* cmpw,... $,(X) */
1221 0 : opType = CPLVirtualMemGetOpTypeImm(rip[3]);
1222 0 : break;
1223 0 : case 0x85: /* test reg,(X) */
1224 0 : opType = OP_LOAD;
1225 0 : break;
1226 0 : case 0x89: /* mov reg,(X) */
1227 0 : opType = OP_STORE;
1228 0 : break;
1229 0 : case 0x8b: /* mov (X),reg */
1230 0 : opType = OP_LOAD;
1231 0 : break;
1232 0 : case 0xc7: /* movw $,(X) */
1233 0 : opType = OP_STORE;
1234 0 : break;
1235 0 : case 0xf7:
1236 : {
1237 0 : if (rip[3] < 0x10) /* testw $,(X) */
1238 0 : opType = OP_LOAD;
1239 : else /* notw (X) */
1240 0 : opType = OP_STORE;
1241 0 : break;
1242 : }
1243 0 : default:
1244 0 : break;
1245 : }
1246 0 : break;
1247 : }
1248 : #endif
1249 0 : case 0x81: /* cmpw,... $,(%rax) */
1250 : case 0x83: /* cmpw,... $,(%rax) */
1251 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1252 0 : break;
1253 :
1254 0 : case 0x85: /* test %ax,(%rax) */
1255 0 : opType = OP_LOAD;
1256 0 : break;
1257 0 : case 0x89: /* mov %ax,(%rax) */
1258 0 : opType = OP_STORE;
1259 0 : break;
1260 0 : case 0x8b: /* mov (%rax),%ax */
1261 0 : opType = OP_LOAD;
1262 0 : break;
1263 0 : case 0xc7: /* movw $,(%rax) */
1264 0 : opType = OP_STORE;
1265 0 : break;
1266 0 : case 0xf3:
1267 : {
1268 0 : switch (rip[2])
1269 : {
1270 0 : case 0xa5: /* rep movsw %ds:(%rsi),%es:(%rdi) */
1271 0 : opType = OP_MOVS_RSI_RDI;
1272 0 : break;
1273 0 : default:
1274 0 : break;
1275 : }
1276 0 : break;
1277 : }
1278 0 : case 0xf7: /* testw $,(%rax) or notw (%rax) */
1279 : {
1280 0 : if (rip[2] < 0x10) /* test */
1281 0 : opType = OP_LOAD;
1282 : else /* not */
1283 0 : opType = OP_STORE;
1284 0 : break;
1285 : }
1286 0 : default:
1287 0 : break;
1288 : }
1289 0 : break;
1290 : }
1291 0 : case 0x80: /* cmpb,... $,(%rax) */
1292 : case 0x81: /* cmpl,... $,(%rax) */
1293 : case 0x83: /* cmpl,... $,(%rax) */
1294 0 : opType = CPLVirtualMemGetOpTypeImm(rip[1]);
1295 0 : break;
1296 0 : case 0x84: /* test %al,(%rax) */
1297 : case 0x85: /* test %eax,(%rax) */
1298 0 : opType = OP_LOAD;
1299 0 : break;
1300 0 : case 0x88: /* mov %al,(%rax) */
1301 0 : opType = OP_STORE;
1302 0 : break;
1303 0 : case 0x89: /* mov %eax,(%rax) */
1304 0 : opType = OP_STORE;
1305 0 : break;
1306 0 : case 0x8a: /* mov (%rax),%al */
1307 0 : opType = OP_LOAD;
1308 0 : break;
1309 0 : case 0x8b: /* mov (%rax),%eax */
1310 0 : opType = OP_LOAD;
1311 0 : break;
1312 0 : case 0xd9: /* 387 float */
1313 : {
1314 0 : if (rip[1] < 0x08) /* flds (%eax) */
1315 0 : opType = OP_LOAD;
1316 0 : else if (rip[1] >= 0x18 && rip[1] <= 0x20) /* fstps (%eax) */
1317 0 : opType = OP_STORE;
1318 0 : break;
1319 : }
1320 0 : case 0xf2: /* SSE 2 */
1321 : {
1322 0 : switch (rip[1])
1323 : {
1324 0 : case 0x0f:
1325 : {
1326 0 : switch (rip[2])
1327 : {
1328 0 : case 0x10: /* movsd (%rax),%xmm0 */
1329 0 : opType = OP_LOAD;
1330 0 : break;
1331 0 : case 0x11: /* movsd %xmm0,(%rax) */
1332 0 : opType = OP_STORE;
1333 0 : break;
1334 0 : case 0x58: /* addsd (%rax),%xmm0 */
1335 0 : opType = OP_LOAD;
1336 0 : break;
1337 0 : case 0x59: /* mulsd (%rax),%xmm0 */
1338 0 : opType = OP_LOAD;
1339 0 : break;
1340 0 : case 0x5c: /* subsd (%rax),%xmm0 */
1341 0 : opType = OP_LOAD;
1342 0 : break;
1343 0 : case 0x5e: /* divsd (%rax),%xmm0 */
1344 0 : opType = OP_LOAD;
1345 0 : break;
1346 0 : default:
1347 0 : break;
1348 : }
1349 0 : break;
1350 : }
1351 : #if defined(__x86_64__)
1352 0 : case 0x41: /* reg=%xmm0, X=%r8 or %r8,%rax,1 */
1353 : case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1354 : case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1355 : case 0x44: /* reg=%xmm8, X=%rax or %rax,%rax,1*/
1356 : case 0x45: /* reg=%xmm8, X=%r8 or %r8,%rax,1 */
1357 : case 0x46: /* reg=%xmm8, X=%rax,%r8,1 */
1358 : case 0x47: /* reg=%xmm8, X=%r8,%r8,1 */
1359 : {
1360 0 : switch (rip[2])
1361 : {
1362 0 : case 0x0f:
1363 : {
1364 0 : switch (rip[3])
1365 : {
1366 0 : case 0x10: /* movsd (X),reg */
1367 0 : opType = OP_LOAD;
1368 0 : break;
1369 0 : case 0x11: /* movsd reg,(X) */
1370 0 : opType = OP_STORE;
1371 0 : break;
1372 0 : case 0x58: /* addsd (X),reg */
1373 0 : opType = OP_LOAD;
1374 0 : break;
1375 0 : case 0x59: /* mulsd (X),reg */
1376 0 : opType = OP_LOAD;
1377 0 : break;
1378 0 : case 0x5c: /* subsd (X),reg */
1379 0 : opType = OP_LOAD;
1380 0 : break;
1381 0 : case 0x5e: /* divsd (X),reg */
1382 0 : opType = OP_LOAD;
1383 0 : break;
1384 0 : default:
1385 0 : break;
1386 : }
1387 0 : break;
1388 : }
1389 0 : default:
1390 0 : break;
1391 : }
1392 0 : break;
1393 : }
1394 : #endif
1395 0 : default:
1396 0 : break;
1397 : }
1398 0 : break;
1399 : }
1400 6 : case 0xf3:
1401 : {
1402 6 : switch (rip[1])
1403 : {
1404 0 : case 0x0f: /* SSE 2 */
1405 : {
1406 0 : switch (rip[2])
1407 : {
1408 0 : case 0x10: /* movss (%rax),%xmm0 */
1409 0 : opType = OP_LOAD;
1410 0 : break;
1411 0 : case 0x11: /* movss %xmm0,(%rax) */
1412 0 : opType = OP_STORE;
1413 0 : break;
1414 0 : case 0x6f: /* movdqu (%rax),%xmm0 */
1415 0 : opType = OP_LOAD;
1416 0 : break;
1417 0 : case 0x7f: /* movdqu %xmm0,(%rax) */
1418 0 : opType = OP_STORE;
1419 0 : break;
1420 0 : default:
1421 0 : break;
1422 : }
1423 0 : break;
1424 : }
1425 : #if defined(__x86_64__)
1426 4 : case 0x41: /* reg=%xmm0, X=%r8 */
1427 : case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1428 : case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1429 : case 0x44: /* reg=%xmm8, X = %rax */
1430 : case 0x45: /* reg=%xmm8, X = %r8 */
1431 : case 0x46: /* reg=%xmm8, X = %rax,%r8,1 */
1432 : case 0x47: /* reg=%xmm8, X = %r8,%r8,1 */
1433 : {
1434 4 : switch (rip[2])
1435 : {
1436 4 : case 0x0f: /* SSE 2 */
1437 : {
1438 4 : switch (rip[3])
1439 : {
1440 0 : case 0x10: /* movss (X),reg */
1441 0 : opType = OP_LOAD;
1442 0 : break;
1443 0 : case 0x11: /* movss reg,(X) */
1444 0 : opType = OP_STORE;
1445 0 : break;
1446 4 : case 0x6f: /* movdqu (X),reg */
1447 4 : opType = OP_LOAD;
1448 4 : break;
1449 0 : case 0x7f: /* movdqu reg,(X) */
1450 0 : opType = OP_STORE;
1451 0 : break;
1452 0 : default:
1453 0 : break;
1454 : }
1455 4 : break;
1456 : }
1457 0 : default:
1458 0 : break;
1459 : }
1460 4 : break;
1461 : }
1462 0 : case 0x48:
1463 : {
1464 0 : switch (rip[2])
1465 : {
1466 0 : case 0xa5: /* rep movsq %ds:(%rsi),%es:(%rdi) */
1467 0 : opType = OP_MOVS_RSI_RDI;
1468 0 : break;
1469 0 : default:
1470 0 : break;
1471 : }
1472 0 : break;
1473 : }
1474 : #endif
1475 0 : case 0xa4: /* rep movsb %ds:(%rsi),%es:(%rdi) */
1476 : case 0xa5: /* rep movsl %ds:(%rsi),%es:(%rdi) */
1477 0 : opType = OP_MOVS_RSI_RDI;
1478 0 : break;
1479 0 : case 0xa6: /* repz cmpsb %es:(%rdi),%ds:(%rsi) */
1480 0 : opType = OP_LOAD;
1481 0 : break;
1482 2 : default:
1483 2 : break;
1484 : }
1485 6 : break;
1486 : }
1487 0 : case 0xf6: /* testb $,(%rax) or notb (%rax) */
1488 : case 0xf7: /* testl $,(%rax) or notl (%rax) */
1489 : {
1490 0 : if (rip[1] < 0x10) /* test */
1491 0 : opType = OP_LOAD;
1492 : else /* not */
1493 0 : opType = OP_STORE;
1494 0 : break;
1495 : }
1496 8 : default:
1497 8 : break;
1498 : }
1499 : #endif
1500 100456 : return opType;
1501 : }
1502 : #endif
1503 :
1504 : /************************************************************************/
1505 : /* CPLVirtualMemManagerPinAddrInternal() */
1506 : /************************************************************************/
1507 :
1508 : static int
1509 100456 : CPLVirtualMemManagerPinAddrInternal(CPLVirtualMemMsgToWorkerThread *msg)
1510 : {
1511 100456 : char wait_ready = '\0';
1512 100456 : char response_buf[4] = {};
1513 :
1514 : // Wait for the helper thread to be ready to process another request.
1515 : while (true)
1516 : {
1517 : const int ret = static_cast<int>(
1518 100456 : read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1));
1519 100456 : if (ret < 0 && errno == EINTR)
1520 : {
1521 : // NOP
1522 : }
1523 : else
1524 : {
1525 100456 : IGNORE_OR_ASSERT_IN_DEBUG(ret == 1);
1526 100456 : break;
1527 : }
1528 0 : }
1529 :
1530 : // Pass the address that caused the fault to the helper thread.
1531 : const ssize_t nRetWrite =
1532 100456 : write(pVirtualMemManager->pipefd_to_thread[1], msg, sizeof(*msg));
1533 100456 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(*msg));
1534 :
1535 : // Wait that the helper thread has fixed the fault.
1536 : while (true)
1537 : {
1538 : const int ret = static_cast<int>(
1539 100456 : read(pVirtualMemManager->pipefd_from_thread[0], response_buf, 4));
1540 100456 : if (ret < 0 && errno == EINTR)
1541 : {
1542 : // NOP
1543 : }
1544 : else
1545 : {
1546 100456 : IGNORE_OR_ASSERT_IN_DEBUG(ret == 4);
1547 100456 : break;
1548 : }
1549 0 : }
1550 :
1551 : // In case the helper thread did not recognize the address as being
1552 : // one that it should take care of, just rely on the previous SIGSEGV
1553 : // handler (with might abort the process).
1554 100456 : return (memcmp(response_buf, MAPPING_FOUND, 4) == 0);
1555 : }
1556 :
1557 : /************************************************************************/
1558 : /* CPLVirtualMemPin() */
1559 : /************************************************************************/
1560 :
1561 0 : void CPLVirtualMemPin(CPLVirtualMem *ctxt, void *pAddr, size_t nSize,
1562 : int bWriteOp)
1563 : {
1564 0 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
1565 0 : return;
1566 :
1567 : CPLVirtualMemMsgToWorkerThread msg;
1568 :
1569 0 : memset(&msg, 0, sizeof(msg));
1570 0 : msg.hRequesterThread = pthread_self();
1571 0 : msg.opType = (bWriteOp) ? OP_STORE : OP_LOAD;
1572 :
1573 0 : char *pBase = reinterpret_cast<char *>(ALIGN_DOWN(pAddr, ctxt->nPageSize));
1574 0 : const size_t n = (reinterpret_cast<char *>(pAddr) - pBase + nSize +
1575 0 : ctxt->nPageSize - 1) /
1576 0 : ctxt->nPageSize;
1577 0 : for (size_t i = 0; i < n; i++)
1578 : {
1579 0 : msg.pFaultAddr = reinterpret_cast<char *>(pBase) + i * ctxt->nPageSize;
1580 0 : CPLVirtualMemManagerPinAddrInternal(&msg);
1581 : }
1582 : }
1583 :
1584 : /************************************************************************/
1585 : /* CPLVirtualMemManagerSIGSEGVHandler() */
1586 : /************************************************************************/
1587 :
1588 : #if defined(__x86_64__)
1589 : #define REG_IP REG_RIP
1590 : #define REG_SI REG_RSI
1591 : #define REG_DI REG_RDI
1592 : #elif defined(__i386__)
1593 : #define REG_IP REG_EIP
1594 : #define REG_SI REG_ESI
1595 : #define REG_DI REG_EDI
1596 : #endif
1597 :
1598 : // Must take care of only using "asynchronous-signal-safe" functions in a signal
1599 : // handler pthread_self(), read() and write() are such. See:
1600 : // https://www.securecoding.cert.org/confluence/display/seccode/SIG30-C.+Call+only+asynchronous-safe+functions+within+signal+handlers
1601 100456 : static void CPLVirtualMemManagerSIGSEGVHandler(int the_signal,
1602 : siginfo_t *the_info,
1603 : void *the_ctxt)
1604 : {
1605 : CPLVirtualMemMsgToWorkerThread msg;
1606 :
1607 100456 : memset(&msg, 0, sizeof(msg));
1608 100456 : msg.pFaultAddr = the_info->si_addr;
1609 100456 : msg.hRequesterThread = pthread_self();
1610 :
1611 : #if defined(__x86_64__) || defined(__i386__)
1612 100456 : ucontext_t *the_ucontext = static_cast<ucontext_t *>(the_ctxt);
1613 100456 : const GByte *rip = reinterpret_cast<const GByte *>(
1614 100456 : the_ucontext->uc_mcontext.gregs[REG_IP]);
1615 100456 : msg.opType = CPLVirtualMemGetOpType(rip);
1616 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1617 : fprintfstderr("at rip %p, bytes: %02x %02x %02x %02x\n", rip, rip[0],
1618 : rip[1], rip[2], rip[3]);
1619 : #endif
1620 100456 : if (msg.opType == OP_MOVS_RSI_RDI)
1621 : {
1622 0 : void *rsi =
1623 0 : reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_SI]);
1624 0 : void *rdi =
1625 0 : reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_DI]);
1626 :
1627 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1628 : fprintfstderr("fault=%p rsi=%p rsi=%p\n", msg.pFaultAddr, rsi, rdi);
1629 : #endif
1630 0 : if (msg.pFaultAddr == rsi)
1631 : {
1632 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1633 : fprintfstderr("load\n");
1634 : #endif
1635 0 : msg.opType = OP_LOAD;
1636 : }
1637 0 : else if (msg.pFaultAddr == rdi)
1638 : {
1639 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1640 : fprintfstderr("store\n");
1641 : #endif
1642 0 : msg.opType = OP_STORE;
1643 : }
1644 : }
1645 : #ifdef DEBUG_VIRTUALMEM
1646 : else if (msg.opType == OP_UNKNOWN)
1647 : {
1648 : static bool bHasWarned = false;
1649 : if (!bHasWarned)
1650 : {
1651 : bHasWarned = true;
1652 : fprintfstderr("at rip %p, unknown bytes: %02x %02x %02x %02x\n",
1653 : rip, rip[0], rip[1], rip[2], rip[3]);
1654 : }
1655 : }
1656 : #endif
1657 : #else
1658 : msg.opType = OP_UNKNOWN;
1659 : #endif
1660 :
1661 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1662 : fprintfstderr("entering handler for %X (addr=%p)\n", pthread_self(),
1663 : the_info->si_addr);
1664 : #endif
1665 :
1666 100456 : if (the_info->si_code != SEGV_ACCERR)
1667 : {
1668 0 : pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1669 0 : return;
1670 : }
1671 :
1672 100456 : if (!CPLVirtualMemManagerPinAddrInternal(&msg))
1673 : {
1674 : // In case the helper thread did not recognize the address as being
1675 : // one that it should take care of, just rely on the previous SIGSEGV
1676 : // handler (with might abort the process).
1677 0 : pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1678 : }
1679 :
1680 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1681 : fprintfstderr("leaving handler for %X (addr=%p)\n", pthread_self(),
1682 : the_info->si_addr);
1683 : #endif
1684 : }
1685 :
1686 : /************************************************************************/
1687 : /* CPLVirtualMemManagerThread() */
1688 : /************************************************************************/
1689 :
1690 100458 : static void CPLVirtualMemManagerThread(void * /* unused_param */)
1691 : {
1692 : while (true)
1693 : {
1694 100458 : char i_m_ready = 1;
1695 100458 : CPLVirtualMemVMA *ctxt = nullptr;
1696 100458 : bool bMappingFound = false;
1697 : CPLVirtualMemMsgToWorkerThread msg;
1698 :
1699 : // Signal that we are ready to process a new request.
1700 : ssize_t nRetWrite =
1701 100458 : write(pVirtualMemManager->pipefd_wait_thread[1], &i_m_ready, 1);
1702 100458 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 1);
1703 :
1704 : // Fetch the address to process.
1705 : const ssize_t nRetRead =
1706 100458 : read(pVirtualMemManager->pipefd_to_thread[0], &msg, sizeof(msg));
1707 100457 : IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == sizeof(msg));
1708 :
1709 : // If CPLVirtualMemManagerTerminate() is called, it will use BYEBYE_ADDR
1710 : // as a means to ask for our termination.
1711 100457 : if (msg.pFaultAddr == BYEBYE_ADDR)
1712 1 : break;
1713 :
1714 : /* Lookup for a mapping that contains addr */
1715 100456 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
1716 100718 : for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
1717 : {
1718 100718 : ctxt = pVirtualMemManager->pasVirtualMem[i];
1719 100718 : if (static_cast<char *>(msg.pFaultAddr) >=
1720 100718 : static_cast<char *>(ctxt->sBase.pData) &&
1721 100474 : static_cast<char *>(msg.pFaultAddr) <
1722 100474 : static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
1723 : {
1724 100456 : bMappingFound = true;
1725 100456 : break;
1726 : }
1727 : }
1728 100456 : CPLReleaseMutex(hVirtualMemManagerMutex);
1729 :
1730 100456 : if (bMappingFound)
1731 : {
1732 100456 : char *const start_page_addr = static_cast<char *>(
1733 100456 : ALIGN_DOWN(msg.pFaultAddr, ctxt->sBase.nPageSize));
1734 100456 : const int iPage =
1735 100456 : static_cast<int>((static_cast<char *>(start_page_addr) -
1736 100456 : static_cast<char *>(ctxt->sBase.pData)) /
1737 100456 : ctxt->sBase.nPageSize);
1738 :
1739 100456 : if (iPage == ctxt->iLastPage)
1740 : {
1741 : // In case 2 threads try to access the same page concurrently it
1742 : // is possible that we are asked to mapped the page again
1743 : // whereas it is always mapped. However, if that number of
1744 : // successive retries is too high, this is certainly a sign that
1745 : // something else happen, like trying to write-access a
1746 : // read-only page 100 is a bit of magic number. Rouault believes
1747 : // it must be at least the number of concurrent threads. 100
1748 : // seems to be really safe!
1749 39593 : ctxt->nRetry++;
1750 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1751 : fprintfstderr("retry on page %d : %d\n", iPage, ctxt->nRetry);
1752 : #endif
1753 39593 : if (ctxt->nRetry >= 100)
1754 : {
1755 0 : CPLError(CE_Failure, CPLE_AppDefined,
1756 : "CPLVirtualMemManagerThread: trying to "
1757 : "write into read-only mapping");
1758 0 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1759 : MAPPING_NOT_FOUND, 4);
1760 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1761 0 : break;
1762 : }
1763 39593 : else if (msg.opType != OP_LOAD &&
1764 5 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1765 5 : !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1766 : {
1767 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1768 : fprintfstderr("switching page %d to write mode\n", iPage);
1769 : #endif
1770 5 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
1771 : const int nRet =
1772 5 : mprotect(start_page_addr, ctxt->sBase.nPageSize,
1773 : PROT_READ | PROT_WRITE);
1774 5 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1775 : }
1776 : }
1777 : else
1778 : {
1779 60863 : ctxt->iLastPage = iPage;
1780 60863 : ctxt->nRetry = 0;
1781 :
1782 60863 : if (TEST_BIT(ctxt->pabitMappedPages, iPage))
1783 : {
1784 34 : if (msg.opType != OP_LOAD &&
1785 0 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1786 0 : !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1787 : {
1788 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1789 : fprintfstderr("switching page %d to write mode\n",
1790 : iPage);
1791 : #endif
1792 0 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
1793 : const int nRet =
1794 0 : mprotect(start_page_addr, ctxt->sBase.nPageSize,
1795 : PROT_READ | PROT_WRITE);
1796 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1797 : }
1798 : else
1799 : {
1800 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1801 : fprintfstderr("unexpected case for page %d\n", iPage);
1802 : #endif
1803 : }
1804 : }
1805 : else
1806 : {
1807 : void *const pPageToFill =
1808 60829 : CPLVirtualMemGetPageToFill(ctxt, start_page_addr);
1809 :
1810 60829 : size_t nToFill = ctxt->sBase.nPageSize;
1811 60829 : if (start_page_addr + nToFill >=
1812 60829 : static_cast<char *>(ctxt->sBase.pData) +
1813 60829 : ctxt->sBase.nSize)
1814 : {
1815 20266 : nToFill = static_cast<char *>(ctxt->sBase.pData) +
1816 20266 : ctxt->sBase.nSize - start_page_addr;
1817 : }
1818 :
1819 60829 : ctxt->pfnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
1820 60829 : start_page_addr - static_cast<char *>(
1821 60829 : ctxt->sBase.pData),
1822 : pPageToFill, nToFill,
1823 : ctxt->sBase.pCbkUserData);
1824 :
1825 : // Now remap this page to its target address and
1826 : // register it in the LRU.
1827 60829 : CPLVirtualMemAddPage(ctxt, start_page_addr, pPageToFill,
1828 : msg.opType, msg.hRequesterThread);
1829 : }
1830 : }
1831 :
1832 : // Warn the segfault handler that we have finished our job.
1833 100456 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1834 : MAPPING_FOUND, 4);
1835 100456 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1836 : }
1837 : else
1838 : {
1839 : // Warn the segfault handler that we have finished our job
1840 : // but that the fault didn't occur in a memory range that
1841 : // is under our responsibility.
1842 0 : CPLError(CE_Failure, CPLE_AppDefined,
1843 : "CPLVirtualMemManagerThread: no mapping found");
1844 0 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1845 : MAPPING_NOT_FOUND, 4);
1846 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1847 : }
1848 100456 : }
1849 1 : }
1850 :
1851 : /************************************************************************/
1852 : /* CPLVirtualMemManagerInit() */
1853 : /************************************************************************/
1854 :
1855 17 : static bool CPLVirtualMemManagerInit()
1856 : {
1857 34 : CPLMutexHolderD(&hVirtualMemManagerMutex);
1858 17 : if (pVirtualMemManager != nullptr)
1859 15 : return true;
1860 :
1861 : struct sigaction act;
1862 2 : pVirtualMemManager = static_cast<CPLVirtualMemManager *>(
1863 2 : VSI_MALLOC_VERBOSE(sizeof(CPLVirtualMemManager)));
1864 2 : if (pVirtualMemManager == nullptr)
1865 0 : return false;
1866 2 : pVirtualMemManager->pasVirtualMem = nullptr;
1867 2 : pVirtualMemManager->nVirtualMemCount = 0;
1868 2 : int nRet = pipe(pVirtualMemManager->pipefd_to_thread);
1869 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1870 2 : nRet = pipe(pVirtualMemManager->pipefd_from_thread);
1871 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1872 2 : nRet = pipe(pVirtualMemManager->pipefd_wait_thread);
1873 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1874 :
1875 : // Install our custom SIGSEGV handler.
1876 2 : act.sa_sigaction = CPLVirtualMemManagerSIGSEGVHandler;
1877 2 : sigemptyset(&act.sa_mask);
1878 2 : act.sa_flags = SA_SIGINFO;
1879 2 : nRet = sigaction(SIGSEGV, &act, &pVirtualMemManager->oldact);
1880 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1881 :
1882 : // Starts the helper thread.
1883 4 : pVirtualMemManager->hHelperThread =
1884 2 : CPLCreateJoinableThread(CPLVirtualMemManagerThread, nullptr);
1885 2 : if (pVirtualMemManager->hHelperThread == nullptr)
1886 : {
1887 0 : VSIFree(pVirtualMemManager);
1888 0 : pVirtualMemManager = nullptr;
1889 0 : return false;
1890 : }
1891 2 : return true;
1892 : }
1893 :
1894 : /************************************************************************/
1895 : /* CPLVirtualMemManagerTerminate() */
1896 : /************************************************************************/
1897 :
1898 1 : void CPLVirtualMemManagerTerminate(void)
1899 : {
1900 1 : if (pVirtualMemManager == nullptr)
1901 0 : return;
1902 :
1903 : CPLVirtualMemMsgToWorkerThread msg;
1904 1 : msg.pFaultAddr = BYEBYE_ADDR;
1905 1 : msg.opType = OP_UNKNOWN;
1906 : memset(&msg.hRequesterThread, 0, sizeof(msg.hRequesterThread));
1907 :
1908 : // Wait for the helper thread to be ready.
1909 : char wait_ready;
1910 : const ssize_t nRetRead =
1911 1 : read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1);
1912 1 : IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == 1);
1913 :
1914 : // Ask it to terminate.
1915 : const ssize_t nRetWrite =
1916 1 : write(pVirtualMemManager->pipefd_to_thread[1], &msg, sizeof(msg));
1917 1 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(msg));
1918 :
1919 : // Wait for its termination.
1920 1 : CPLJoinThread(pVirtualMemManager->hHelperThread);
1921 :
1922 : // Cleanup everything.
1923 1 : while (pVirtualMemManager->nVirtualMemCount > 0)
1924 0 : CPLVirtualMemFree(reinterpret_cast<CPLVirtualMem *>(
1925 : pVirtualMemManager
1926 0 : ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount - 1]));
1927 1 : CPLFree(pVirtualMemManager->pasVirtualMem);
1928 :
1929 1 : close(pVirtualMemManager->pipefd_to_thread[0]);
1930 1 : close(pVirtualMemManager->pipefd_to_thread[1]);
1931 1 : close(pVirtualMemManager->pipefd_from_thread[0]);
1932 1 : close(pVirtualMemManager->pipefd_from_thread[1]);
1933 1 : close(pVirtualMemManager->pipefd_wait_thread[0]);
1934 1 : close(pVirtualMemManager->pipefd_wait_thread[1]);
1935 :
1936 : // Restore previous handler.
1937 1 : sigaction(SIGSEGV, &pVirtualMemManager->oldact, nullptr);
1938 :
1939 1 : CPLFree(pVirtualMemManager);
1940 1 : pVirtualMemManager = nullptr;
1941 :
1942 1 : CPLDestroyMutex(hVirtualMemManagerMutex);
1943 1 : hVirtualMemManagerMutex = nullptr;
1944 : }
1945 :
1946 : #else // HAVE_VIRTUAL_MEM_VMA
1947 :
1948 : CPLVirtualMem *CPLVirtualMemNew(
1949 : size_t /* nSize */, size_t /* nCacheSize */, size_t /* nPageSizeHint */,
1950 : int /* bSingleThreadUsage */, CPLVirtualMemAccessMode /* eAccessMode */,
1951 : CPLVirtualMemCachePageCbk /* pfnCachePage */,
1952 : CPLVirtualMemUnCachePageCbk /* pfnUnCachePage */,
1953 : CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
1954 : {
1955 : CPLError(CE_Failure, CPLE_NotSupported,
1956 : "CPLVirtualMemNew() unsupported on "
1957 : "this operating system / configuration");
1958 : return nullptr;
1959 : }
1960 :
1961 : void CPLVirtualMemDeclareThread(CPLVirtualMem * /* ctxt */)
1962 : {
1963 : }
1964 :
1965 : void CPLVirtualMemUnDeclareThread(CPLVirtualMem * /* ctxt */)
1966 : {
1967 : }
1968 :
1969 : void CPLVirtualMemPin(CPLVirtualMem * /* ctxt */, void * /* pAddr */,
1970 : size_t /* nSize */, int /* bWriteOp */)
1971 : {
1972 : }
1973 :
1974 : void CPLVirtualMemManagerTerminate(void)
1975 : {
1976 : }
1977 :
1978 : #endif // HAVE_VIRTUAL_MEM_VMA
1979 :
1980 : #ifdef HAVE_MMAP
1981 :
1982 : /************************************************************************/
1983 : /* CPLVirtualMemFreeFileMemoryMapped() */
1984 : /************************************************************************/
1985 :
1986 28 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMem *ctxt)
1987 : {
1988 28 : const size_t nMappingSize = ctxt->nSize +
1989 28 : static_cast<GByte *>(ctxt->pData) -
1990 28 : static_cast<GByte *>(ctxt->pDataToFree);
1991 28 : const int nRet = munmap(ctxt->pDataToFree, nMappingSize);
1992 28 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1993 28 : }
1994 :
1995 : /************************************************************************/
1996 : /* CPLVirtualMemFileMapNew() */
1997 : /************************************************************************/
1998 :
1999 28 : CPLVirtualMem *CPLVirtualMemFileMapNew(
2000 : VSILFILE *fp, vsi_l_offset nOffset, vsi_l_offset nLength,
2001 : CPLVirtualMemAccessMode eAccessMode,
2002 : CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2003 : {
2004 : #if SIZEOF_VOIDP == 4
2005 : if (nLength != static_cast<size_t>(nLength))
2006 : {
2007 : CPLError(CE_Failure, CPLE_AppDefined,
2008 : "nLength = " CPL_FRMT_GUIB
2009 : " incompatible with 32 bit architecture",
2010 : nLength);
2011 : return nullptr;
2012 : }
2013 : if (nOffset + CPLGetPageSize() !=
2014 : static_cast<vsi_l_offset>(
2015 : static_cast<off_t>(nOffset + CPLGetPageSize())))
2016 : {
2017 : CPLError(CE_Failure, CPLE_AppDefined,
2018 : "nOffset = " CPL_FRMT_GUIB
2019 : " incompatible with 32 bit architecture",
2020 : nOffset);
2021 : return nullptr;
2022 : }
2023 : #endif
2024 :
2025 : int fd = static_cast<int>(
2026 28 : reinterpret_cast<GUIntptr_t>(VSIFGetNativeFileDescriptorL(fp)));
2027 28 : if (fd == 0)
2028 : {
2029 0 : CPLError(CE_Failure, CPLE_AppDefined,
2030 : "Cannot operate on a virtual file");
2031 0 : return nullptr;
2032 : }
2033 :
2034 : const off_t nAlignedOffset =
2035 28 : static_cast<off_t>((nOffset / CPLGetPageSize()) * CPLGetPageSize());
2036 28 : size_t nAlignment = static_cast<size_t>(nOffset - nAlignedOffset);
2037 28 : size_t nMappingSize = static_cast<size_t>(nLength + nAlignment);
2038 :
2039 : // Need to ensure that the requested extent fits into the file size
2040 : // otherwise SIGBUS errors will occur when using the mapping.
2041 28 : vsi_l_offset nCurPos = VSIFTellL(fp);
2042 28 : if (VSIFSeekL(fp, 0, SEEK_END) != 0)
2043 0 : return nullptr;
2044 28 : vsi_l_offset nFileSize = VSIFTellL(fp);
2045 28 : if (nFileSize < nOffset + nLength)
2046 : {
2047 4 : if (eAccessMode != VIRTUALMEM_READWRITE)
2048 : {
2049 0 : CPLError(CE_Failure, CPLE_AppDefined,
2050 : "Trying to map an extent outside of the file");
2051 0 : CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2052 0 : return nullptr;
2053 : }
2054 : else
2055 : {
2056 4 : char ch = 0;
2057 8 : if (VSIFSeekL(fp, nOffset + nLength - 1, SEEK_SET) != 0 ||
2058 4 : VSIFWriteL(&ch, 1, 1, fp) != 1)
2059 : {
2060 0 : CPLError(CE_Failure, CPLE_AppDefined,
2061 : "Cannot extend file to mapping size");
2062 0 : CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2063 0 : return nullptr;
2064 : }
2065 : }
2066 : }
2067 28 : if (VSIFSeekL(fp, nCurPos, SEEK_SET) != 0)
2068 0 : return nullptr;
2069 :
2070 : CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2071 28 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2072 28 : if (ctxt == nullptr)
2073 0 : return nullptr;
2074 :
2075 : void *addr =
2076 28 : mmap(nullptr, nMappingSize,
2077 : eAccessMode == VIRTUALMEM_READWRITE ? PROT_READ | PROT_WRITE
2078 : : PROT_READ,
2079 : MAP_SHARED, fd, nAlignedOffset);
2080 28 : if (addr == MAP_FAILED)
2081 : {
2082 0 : int myerrno = errno;
2083 0 : CPLError(CE_Failure, CPLE_AppDefined, "mmap() failed : %s",
2084 : strerror(myerrno));
2085 0 : VSIFree(ctxt);
2086 : // cppcheck thinks we are leaking addr.
2087 : // cppcheck-suppress memleak
2088 0 : return nullptr;
2089 : }
2090 :
2091 28 : ctxt->eType = VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2092 28 : ctxt->nRefCount = 1;
2093 28 : ctxt->eAccessMode = eAccessMode;
2094 28 : ctxt->pData = static_cast<GByte *>(addr) + nAlignment;
2095 28 : ctxt->pDataToFree = addr;
2096 28 : ctxt->nSize = static_cast<size_t>(nLength);
2097 28 : ctxt->nPageSize = CPLGetPageSize();
2098 28 : ctxt->bSingleThreadUsage = false;
2099 28 : ctxt->pfnFreeUserData = pfnFreeUserData;
2100 28 : ctxt->pCbkUserData = pCbkUserData;
2101 :
2102 28 : return ctxt;
2103 : }
2104 :
2105 : #else // HAVE_MMAP
2106 :
2107 : CPLVirtualMem *CPLVirtualMemFileMapNew(
2108 : VSILFILE * /* fp */, vsi_l_offset /* nOffset */, vsi_l_offset /* nLength */,
2109 : CPLVirtualMemAccessMode /* eAccessMode */,
2110 : CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
2111 : {
2112 : CPLError(CE_Failure, CPLE_NotSupported,
2113 : "CPLVirtualMemFileMapNew() unsupported on this "
2114 : "operating system / configuration");
2115 : return nullptr;
2116 : }
2117 :
2118 : #endif // HAVE_MMAP
2119 :
2120 : /************************************************************************/
2121 : /* CPLGetPageSize() */
2122 : /************************************************************************/
2123 :
2124 105 : size_t CPLGetPageSize(void)
2125 : {
2126 : #if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
2127 105 : return static_cast<size_t>(sysconf(_SC_PAGESIZE));
2128 : #else
2129 : return 0;
2130 : #endif
2131 : }
2132 :
2133 : /************************************************************************/
2134 : /* CPLIsVirtualMemFileMapAvailable() */
2135 : /************************************************************************/
2136 :
2137 30 : int CPLIsVirtualMemFileMapAvailable(void)
2138 : {
2139 : #ifdef HAVE_MMAP
2140 30 : return TRUE;
2141 : #else
2142 : return FALSE;
2143 : #endif
2144 : }
2145 :
2146 : /************************************************************************/
2147 : /* CPLVirtualMemFree() */
2148 : /************************************************************************/
2149 :
2150 61 : void CPLVirtualMemFree(CPLVirtualMem *ctxt)
2151 : {
2152 61 : if (ctxt == nullptr || --(ctxt->nRefCount) > 0)
2153 8 : return;
2154 :
2155 53 : if (ctxt->pVMemBase != nullptr)
2156 : {
2157 8 : CPLVirtualMemFree(ctxt->pVMemBase);
2158 8 : if (ctxt->pfnFreeUserData != nullptr)
2159 8 : ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2160 8 : CPLFree(ctxt);
2161 8 : return;
2162 : }
2163 :
2164 : #ifdef HAVE_MMAP
2165 45 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
2166 28 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
2167 : #endif
2168 : #ifdef HAVE_VIRTUAL_MEM_VMA
2169 45 : if (ctxt->eType == VIRTUAL_MEM_TYPE_VMA)
2170 17 : CPLVirtualMemFreeFileMemoryMapped(
2171 : reinterpret_cast<CPLVirtualMemVMA *>(ctxt));
2172 : #endif
2173 :
2174 45 : if (ctxt->pfnFreeUserData != nullptr)
2175 16 : ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2176 45 : CPLFree(ctxt);
2177 : }
2178 :
2179 : /************************************************************************/
2180 : /* CPLVirtualMemGetAddr() */
2181 : /************************************************************************/
2182 :
2183 302 : void *CPLVirtualMemGetAddr(CPLVirtualMem *ctxt)
2184 : {
2185 302 : return ctxt->pData;
2186 : }
2187 :
2188 : /************************************************************************/
2189 : /* CPLVirtualMemIsFileMapping() */
2190 : /************************************************************************/
2191 :
2192 4 : int CPLVirtualMemIsFileMapping(CPLVirtualMem *ctxt)
2193 : {
2194 4 : return ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2195 : }
2196 :
2197 : /************************************************************************/
2198 : /* CPLVirtualMemGetAccessMode() */
2199 : /************************************************************************/
2200 :
2201 0 : CPLVirtualMemAccessMode CPLVirtualMemGetAccessMode(CPLVirtualMem *ctxt)
2202 : {
2203 0 : return ctxt->eAccessMode;
2204 : }
2205 :
2206 : /************************************************************************/
2207 : /* CPLVirtualMemGetPageSize() */
2208 : /************************************************************************/
2209 :
2210 5 : size_t CPLVirtualMemGetPageSize(CPLVirtualMem *ctxt)
2211 : {
2212 5 : return ctxt->nPageSize;
2213 : }
2214 :
2215 : /************************************************************************/
2216 : /* CPLVirtualMemGetSize() */
2217 : /************************************************************************/
2218 :
2219 271 : size_t CPLVirtualMemGetSize(CPLVirtualMem *ctxt)
2220 : {
2221 271 : return ctxt->nSize;
2222 : }
2223 :
2224 : /************************************************************************/
2225 : /* CPLVirtualMemIsAccessThreadSafe() */
2226 : /************************************************************************/
2227 :
2228 1 : int CPLVirtualMemIsAccessThreadSafe(CPLVirtualMem *ctxt)
2229 : {
2230 1 : return !ctxt->bSingleThreadUsage;
2231 : }
2232 :
2233 : /************************************************************************/
2234 : /* CPLVirtualMemDerivedNew() */
2235 : /************************************************************************/
2236 :
2237 8 : CPLVirtualMem *CPLVirtualMemDerivedNew(
2238 : CPLVirtualMem *pVMemBase, vsi_l_offset nOffset, vsi_l_offset nSize,
2239 : CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2240 : {
2241 8 : if (nOffset + nSize > pVMemBase->nSize)
2242 0 : return nullptr;
2243 :
2244 : CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2245 8 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2246 8 : if (ctxt == nullptr)
2247 0 : return nullptr;
2248 :
2249 8 : ctxt->eType = pVMemBase->eType;
2250 8 : ctxt->nRefCount = 1;
2251 8 : ctxt->pVMemBase = pVMemBase;
2252 8 : pVMemBase->nRefCount++;
2253 8 : ctxt->eAccessMode = pVMemBase->eAccessMode;
2254 8 : ctxt->pData = static_cast<GByte *>(pVMemBase->pData) + nOffset;
2255 8 : ctxt->pDataToFree = nullptr;
2256 8 : ctxt->nSize = static_cast<size_t>(nSize);
2257 8 : ctxt->nPageSize = pVMemBase->nPageSize;
2258 8 : ctxt->bSingleThreadUsage = CPL_TO_BOOL(pVMemBase->bSingleThreadUsage);
2259 8 : ctxt->pfnFreeUserData = pfnFreeUserData;
2260 8 : ctxt->pCbkUserData = pCbkUserData;
2261 :
2262 8 : return ctxt;
2263 : }
|