Line data Source code
1 : /**********************************************************************
2 : *
3 : * Name: cpl_virtualmem.cpp
4 : * Project: CPL - Common Portability Library
5 : * Purpose: Virtual memory
6 : * Author: Even Rouault, <even dot rouault at spatialys.com>
7 : *
8 : **********************************************************************
9 : * Copyright (c) 2014, Even Rouault <even dot rouault at spatialys.com>
10 : *
11 : * SPDX-License-Identifier: MIT
12 : ****************************************************************************/
13 :
14 : #ifndef _GNU_SOURCE
15 : #define _GNU_SOURCE
16 : #endif
17 :
18 : // to have off_t on 64bit possibly
19 : #ifndef _FILE_OFFSET_BITS
20 : #define _FILE_OFFSET_BITS 64
21 : #endif
22 :
23 : #include "cpl_virtualmem.h"
24 :
25 : #include <algorithm>
26 : #include <cassert>
27 :
28 : #include "cpl_atomic_ops.h"
29 : #include "cpl_config.h"
30 : #include "cpl_conv.h"
31 : #include "cpl_error.h"
32 : #include "cpl_multiproc.h"
33 :
34 : #ifdef NDEBUG
35 : // Non NDEBUG: Ignore the result.
36 : #define IGNORE_OR_ASSERT_IN_DEBUG(expr) CPL_IGNORE_RET_VAL((expr))
37 : #else
38 : // Debug: Assert.
39 : #define IGNORE_OR_ASSERT_IN_DEBUG(expr) assert((expr))
40 : #endif
41 :
42 : #if defined(__linux) && defined(CPL_MULTIPROC_PTHREAD)
43 : #ifndef HAVE_5ARGS_MREMAP
44 : // FIXME? gcore/virtualmem.py tests fail/crash when HAVE_5ARGS_MREMAP
45 : // is not defined.
46 : #warning "HAVE_5ARGS_MREMAP not found. Disabling HAVE_VIRTUAL_MEM_VMA"
47 : #else
48 : #define HAVE_VIRTUAL_MEM_VMA
49 : #endif
50 : #endif
51 :
52 : #if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
53 : #include <unistd.h> // read, write, close, pipe, sysconf
54 : #include <sys/mman.h> // mmap, munmap, mremap
55 : #endif
56 :
57 : typedef enum
58 : {
59 : VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED,
60 : VIRTUAL_MEM_TYPE_VMA
61 : } CPLVirtualMemType;
62 :
63 : struct CPLVirtualMem
64 : {
65 : CPLVirtualMemType eType;
66 :
67 : struct CPLVirtualMem *pVMemBase;
68 : int nRefCount;
69 :
70 : CPLVirtualMemAccessMode eAccessMode;
71 :
72 : size_t nPageSize;
73 : // Aligned on nPageSize.
74 : void *pData;
75 : // Returned by mmap(), potentially lower than pData.
76 : void *pDataToFree;
77 : // Requested size (unrounded).
78 : size_t nSize;
79 :
80 : bool bSingleThreadUsage;
81 :
82 : void *pCbkUserData;
83 : CPLVirtualMemFreeUserData pfnFreeUserData;
84 : };
85 :
86 : #ifdef HAVE_VIRTUAL_MEM_VMA
87 :
88 : #include <sys/select.h> // select
89 : #include <sys/stat.h> // open()
90 : #include <sys/types.h> // open()
91 : #include <errno.h>
92 : #include <fcntl.h> // open()
93 : #include <signal.h> // sigaction
94 : #include <stdio.h>
95 : #include <stdlib.h>
96 : #include <string.h>
97 : #include <pthread.h>
98 :
99 : #ifndef HAVE_5ARGS_MREMAP
100 : #include "cpl_atomic_ops.h"
101 : #endif
102 :
103 : /* Linux specific (i.e. non POSIX compliant) features used:
104 : - returning from a SIGSEGV handler is clearly a POSIX violation, but in
105 : practice most POSIX systems should be happy.
106 : - mremap() with 5 args is Linux specific. It is used when the user
107 : callback is invited to fill a page, we currently mmap() a
108 : writable page, let it filled it, and afterwards mremap() that
109 : temporary page onto the location where the fault occurred.
110 : If we have no mremap(), the workaround is to pause other threads that
111 : consume the current view while we are updating the faulted page, otherwise
112 : a non-paused thread could access a page that is in the middle of being
113 : filled... The way we pause those threads is quite original : we send them
114 : a SIGUSR1 and wait that they are stuck in the temporary SIGUSR1 handler...
115 : - MAP_ANONYMOUS isn't documented in POSIX, but very commonly found
116 : (sometimes called MAP_ANON)
117 : - dealing with the limitation of number of memory mapping regions,
118 : and the 65536 limit.
119 : - other things I've not identified
120 : */
121 :
122 : #define ALIGN_DOWN(p, pagesize) \
123 : reinterpret_cast<void *>((reinterpret_cast<GUIntptr_t>(p)) / (pagesize) * \
124 : (pagesize))
125 : #define ALIGN_UP(p, pagesize) \
126 : reinterpret_cast<void *>( \
127 : cpl::div_round_up(reinterpret_cast<GUIntptr_t>(p), (pagesize)) * \
128 : (pagesize))
129 :
130 : #define DEFAULT_PAGE_SIZE (256 * 256)
131 : #define MAXIMUM_PAGE_SIZE (32 * 1024 * 1024)
132 :
133 : // Linux Kernel limit.
134 : #define MAXIMUM_COUNT_OF_MAPPINGS 65536
135 :
136 : #define BYEBYE_ADDR (reinterpret_cast<void *>(~static_cast<size_t>(0)))
137 :
138 : #define MAPPING_FOUND "yeah"
139 : #define MAPPING_NOT_FOUND "doh!"
140 :
141 : #define SET_BIT(ar, bitnumber) ar[(bitnumber) / 8] |= 1 << ((bitnumber) % 8)
142 : #define UNSET_BIT(ar, bitnumber) \
143 : ar[(bitnumber) / 8] &= ~(1 << ((bitnumber) % 8))
144 : #define TEST_BIT(ar, bitnumber) (ar[(bitnumber) / 8] & (1 << ((bitnumber) % 8)))
145 :
146 : typedef enum
147 : {
148 : OP_LOAD,
149 : OP_STORE,
150 : OP_MOVS_RSI_RDI,
151 : OP_UNKNOWN
152 : } OpType;
153 :
154 : typedef struct
155 : {
156 : CPLVirtualMem sBase;
157 :
158 : GByte *pabitMappedPages;
159 : GByte *pabitRWMappedPages;
160 :
161 : int nCacheMaxSizeInPages; // Maximum size of page array.
162 : int *panLRUPageIndices; // Array with indices of cached pages.
163 : int iLRUStart; // Index in array where to
164 : // write next page index.
165 : int nLRUSize; // Current size of the array.
166 :
167 : int iLastPage; // Last page accessed.
168 : int nRetry; // Number of consecutive
169 : // retries to that last page.
170 :
171 : CPLVirtualMemCachePageCbk pfnCachePage; // Called when a page is
172 : // mapped.
173 : CPLVirtualMemUnCachePageCbk pfnUnCachePage; // Called when a (writable)
174 : // page is unmapped.
175 :
176 : #ifndef HAVE_5ARGS_MREMAP
177 : CPLMutex *hMutexThreadArray;
178 : int nThreads;
179 : pthread_t *pahThreads;
180 : #endif
181 : } CPLVirtualMemVMA;
182 :
183 : typedef struct
184 : {
185 : // hVirtualMemManagerMutex protects the 2 following variables.
186 : CPLVirtualMemVMA **pasVirtualMem;
187 : int nVirtualMemCount;
188 :
189 : int pipefd_to_thread[2];
190 : int pipefd_from_thread[2];
191 : int pipefd_wait_thread[2];
192 : CPLJoinableThread *hHelperThread;
193 :
194 : // Using sigaction without testing HAVE_SIGACTION since we are in a Linux
195 : // specific code path
196 : struct sigaction oldact;
197 : } CPLVirtualMemManager;
198 :
199 : typedef struct
200 : {
201 : void *pFaultAddr;
202 : OpType opType;
203 : pthread_t hRequesterThread;
204 : } CPLVirtualMemMsgToWorkerThread;
205 :
206 : // TODO: Singletons.
207 : static CPLVirtualMemManager *pVirtualMemManager = nullptr;
208 : static CPLMutex *hVirtualMemManagerMutex = nullptr;
209 :
210 : static bool CPLVirtualMemManagerInit();
211 :
212 : #ifdef DEBUG_VIRTUALMEM
213 :
214 : /************************************************************************/
215 : /* fprintfstderr() */
216 : /************************************************************************/
217 :
218 : // This function may be called from signal handlers where most functions
219 : // from the C library are unsafe to be called. fprintf() is clearly one
220 : // of those functions (see
221 : // http://stackoverflow.com/questions/4554129/linux-glibc-can-i-use-fprintf-in-signal-handler)
222 : // vsnprintf() is *probably* safer with respect to that (but there is no
223 : // guarantee though).
224 : // write() is async-signal-safe.
225 : static void fprintfstderr(const char *fmt, ...)
226 : {
227 : char buffer[80] = {};
228 : va_list ap;
229 : va_start(ap, fmt);
230 : vsnprintf(buffer, sizeof(buffer), fmt, ap);
231 : va_end(ap);
232 : int offset = 0;
233 : while (true)
234 : {
235 : const size_t nSizeToWrite = strlen(buffer + offset);
236 : int ret = static_cast<int>(write(2, buffer + offset, nSizeToWrite));
237 : if (ret < 0 && errno == EINTR)
238 : {
239 : }
240 : else
241 : {
242 : if (ret == static_cast<int>(nSizeToWrite))
243 : break;
244 : offset += ret;
245 : }
246 : }
247 : }
248 :
249 : #endif
250 :
251 : /************************************************************************/
252 : /* CPLVirtualMemManagerRegisterVirtualMem() */
253 : /************************************************************************/
254 :
255 17 : static bool CPLVirtualMemManagerRegisterVirtualMem(CPLVirtualMemVMA *ctxt)
256 : {
257 17 : if (!CPLVirtualMemManagerInit())
258 0 : return false;
259 :
260 17 : bool bSuccess = true;
261 17 : IGNORE_OR_ASSERT_IN_DEBUG(ctxt);
262 17 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
263 : CPLVirtualMemVMA **pasVirtualMemNew = static_cast<CPLVirtualMemVMA **>(
264 17 : VSI_REALLOC_VERBOSE(pVirtualMemManager->pasVirtualMem,
265 : sizeof(CPLVirtualMemVMA *) *
266 : (pVirtualMemManager->nVirtualMemCount + 1)));
267 17 : if (pasVirtualMemNew == nullptr)
268 : {
269 0 : bSuccess = false;
270 : }
271 : else
272 : {
273 17 : pVirtualMemManager->pasVirtualMem = pasVirtualMemNew;
274 : pVirtualMemManager
275 17 : ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount] = ctxt;
276 17 : pVirtualMemManager->nVirtualMemCount++;
277 : }
278 17 : CPLReleaseMutex(hVirtualMemManagerMutex);
279 17 : return bSuccess;
280 : }
281 :
282 : /************************************************************************/
283 : /* CPLVirtualMemManagerUnregisterVirtualMem() */
284 : /************************************************************************/
285 :
286 17 : static void CPLVirtualMemManagerUnregisterVirtualMem(CPLVirtualMemVMA *ctxt)
287 : {
288 17 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
289 23 : for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
290 : {
291 23 : if (pVirtualMemManager->pasVirtualMem[i] == ctxt)
292 : {
293 17 : if (i < pVirtualMemManager->nVirtualMemCount - 1)
294 : {
295 9 : memmove(pVirtualMemManager->pasVirtualMem + i,
296 9 : pVirtualMemManager->pasVirtualMem + i + 1,
297 : sizeof(CPLVirtualMem *) *
298 9 : (pVirtualMemManager->nVirtualMemCount - i - 1));
299 : }
300 17 : pVirtualMemManager->nVirtualMemCount--;
301 17 : break;
302 : }
303 : }
304 17 : CPLReleaseMutex(hVirtualMemManagerMutex);
305 17 : }
306 :
307 : /************************************************************************/
308 : /* CPLVirtualMemNew() */
309 : /************************************************************************/
310 :
311 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt);
312 :
313 17 : CPLVirtualMem *CPLVirtualMemNew(size_t nSize, size_t nCacheSize,
314 : size_t nPageSizeHint, int bSingleThreadUsage,
315 : CPLVirtualMemAccessMode eAccessMode,
316 : CPLVirtualMemCachePageCbk pfnCachePage,
317 : CPLVirtualMemUnCachePageCbk pfnUnCachePage,
318 : CPLVirtualMemFreeUserData pfnFreeUserData,
319 : void *pCbkUserData)
320 : {
321 17 : size_t nMinPageSize = CPLGetPageSize();
322 17 : size_t nPageSize = DEFAULT_PAGE_SIZE;
323 :
324 17 : IGNORE_OR_ASSERT_IN_DEBUG(nSize > 0);
325 17 : IGNORE_OR_ASSERT_IN_DEBUG(pfnCachePage != nullptr);
326 :
327 17 : if (nPageSizeHint >= nMinPageSize && nPageSizeHint <= MAXIMUM_PAGE_SIZE)
328 : {
329 5 : if ((nPageSizeHint % nMinPageSize) == 0)
330 5 : nPageSize = nPageSizeHint;
331 : else
332 : {
333 0 : int nbits = 0;
334 0 : nPageSize = static_cast<size_t>(nPageSizeHint);
335 0 : do
336 : {
337 0 : nPageSize >>= 1;
338 0 : nbits++;
339 0 : } while (nPageSize > 0);
340 0 : nPageSize = static_cast<size_t>(1) << (nbits - 1);
341 0 : if (nPageSize < static_cast<size_t>(nPageSizeHint))
342 0 : nPageSize <<= 1;
343 : }
344 : }
345 :
346 17 : if ((nPageSize % nMinPageSize) != 0)
347 0 : nPageSize = nMinPageSize;
348 :
349 17 : if (nCacheSize > nSize)
350 16 : nCacheSize = nSize;
351 1 : else if (nCacheSize == 0)
352 0 : nCacheSize = 1;
353 :
354 17 : int nMappings = 0;
355 :
356 : // Linux specific:
357 : // Count the number of existing memory mappings.
358 17 : FILE *f = fopen("/proc/self/maps", "rb");
359 17 : if (f != nullptr)
360 : {
361 17 : char buffer[80] = {};
362 64966 : while (fgets(buffer, sizeof(buffer), f) != nullptr)
363 64949 : nMappings++;
364 17 : fclose(f);
365 : }
366 :
367 17 : size_t nCacheMaxSizeInPages = 0;
368 : while (true)
369 : {
370 : // /proc/self/maps must not have more than 65K lines.
371 17 : nCacheMaxSizeInPages = (nCacheSize + 2 * nPageSize - 1) / nPageSize;
372 17 : if (nCacheMaxSizeInPages >
373 17 : static_cast<size_t>((MAXIMUM_COUNT_OF_MAPPINGS * 9 / 10) -
374 : nMappings))
375 0 : nPageSize <<= 1;
376 : else
377 17 : break;
378 : }
379 17 : size_t nRoundedMappingSize =
380 17 : ((nSize + 2 * nPageSize - 1) / nPageSize) * nPageSize;
381 17 : void *pData = mmap(nullptr, nRoundedMappingSize, PROT_NONE,
382 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
383 17 : if (pData == MAP_FAILED)
384 : {
385 0 : perror("mmap");
386 0 : return nullptr;
387 : }
388 : CPLVirtualMemVMA *ctxt = static_cast<CPLVirtualMemVMA *>(
389 17 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMemVMA)));
390 17 : if (ctxt == nullptr)
391 : {
392 0 : munmap(pData, nRoundedMappingSize);
393 0 : return nullptr;
394 : }
395 17 : ctxt->sBase.nRefCount = 1;
396 17 : ctxt->sBase.eType = VIRTUAL_MEM_TYPE_VMA;
397 17 : ctxt->sBase.eAccessMode = eAccessMode;
398 17 : ctxt->sBase.pDataToFree = pData;
399 17 : ctxt->sBase.pData = ALIGN_UP(pData, nPageSize);
400 17 : ctxt->sBase.nPageSize = nPageSize;
401 17 : ctxt->sBase.nSize = nSize;
402 17 : ctxt->sBase.bSingleThreadUsage = CPL_TO_BOOL(bSingleThreadUsage);
403 17 : ctxt->sBase.pfnFreeUserData = pfnFreeUserData;
404 17 : ctxt->sBase.pCbkUserData = pCbkUserData;
405 :
406 17 : ctxt->pabitMappedPages = static_cast<GByte *>(
407 17 : VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
408 17 : if (ctxt->pabitMappedPages == nullptr)
409 : {
410 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
411 0 : CPLFree(ctxt);
412 0 : return nullptr;
413 : }
414 17 : ctxt->pabitRWMappedPages = static_cast<GByte *>(
415 17 : VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
416 17 : if (ctxt->pabitRWMappedPages == nullptr)
417 : {
418 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
419 0 : CPLFree(ctxt);
420 0 : return nullptr;
421 : }
422 : // Need at least 2 pages in case for a rep movs instruction
423 : // that operate in the view.
424 17 : ctxt->nCacheMaxSizeInPages = static_cast<int>(nCacheMaxSizeInPages);
425 17 : ctxt->panLRUPageIndices = static_cast<int *>(
426 17 : VSI_MALLOC_VERBOSE(ctxt->nCacheMaxSizeInPages * sizeof(int)));
427 17 : if (ctxt->panLRUPageIndices == nullptr)
428 : {
429 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
430 0 : CPLFree(ctxt);
431 0 : return nullptr;
432 : }
433 17 : ctxt->iLRUStart = 0;
434 17 : ctxt->nLRUSize = 0;
435 17 : ctxt->iLastPage = -1;
436 17 : ctxt->nRetry = 0;
437 17 : ctxt->pfnCachePage = pfnCachePage;
438 17 : ctxt->pfnUnCachePage = pfnUnCachePage;
439 :
440 : #ifndef HAVE_5ARGS_MREMAP
441 : if (!ctxt->sBase.bSingleThreadUsage)
442 : {
443 : ctxt->hMutexThreadArray = CPLCreateMutex();
444 : IGNORE_OR_ASSERT_IN_DEBUG(ctxt->hMutexThreadArray != nullptr);
445 : CPLReleaseMutex(ctxt->hMutexThreadArray);
446 : ctxt->nThreads = 0;
447 : ctxt->pahThreads = nullptr;
448 : }
449 : #endif
450 :
451 17 : if (!CPLVirtualMemManagerRegisterVirtualMem(ctxt))
452 : {
453 0 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
454 0 : CPLFree(ctxt);
455 0 : return nullptr;
456 : }
457 :
458 17 : return reinterpret_cast<CPLVirtualMem *>(ctxt);
459 : }
460 :
461 : /************************************************************************/
462 : /* CPLVirtualMemFreeFileMemoryMapped() */
463 : /************************************************************************/
464 :
465 17 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt)
466 : {
467 17 : CPLVirtualMemManagerUnregisterVirtualMem(ctxt);
468 :
469 17 : size_t nRoundedMappingSize =
470 17 : ((ctxt->sBase.nSize + 2 * ctxt->sBase.nPageSize - 1) /
471 17 : ctxt->sBase.nPageSize) *
472 17 : ctxt->sBase.nPageSize;
473 17 : if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
474 7 : ctxt->pabitRWMappedPages != nullptr && ctxt->pfnUnCachePage != nullptr)
475 : {
476 27 : for (size_t i = 0; i < nRoundedMappingSize / ctxt->sBase.nPageSize; i++)
477 : {
478 20 : if (TEST_BIT(ctxt->pabitRWMappedPages, i))
479 : {
480 13 : void *addr = static_cast<char *>(ctxt->sBase.pData) +
481 13 : i * ctxt->sBase.nPageSize;
482 13 : ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
483 13 : i * ctxt->sBase.nPageSize, addr,
484 : ctxt->sBase.nPageSize,
485 : ctxt->sBase.pCbkUserData);
486 : }
487 : }
488 : }
489 17 : int nRet = munmap(ctxt->sBase.pDataToFree, nRoundedMappingSize);
490 17 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
491 17 : CPLFree(ctxt->pabitMappedPages);
492 17 : CPLFree(ctxt->pabitRWMappedPages);
493 17 : CPLFree(ctxt->panLRUPageIndices);
494 : #ifndef HAVE_5ARGS_MREMAP
495 : if (!ctxt->sBase.bSingleThreadUsage)
496 : {
497 : CPLFree(ctxt->pahThreads);
498 : CPLDestroyMutex(ctxt->hMutexThreadArray);
499 : }
500 : #endif
501 17 : }
502 :
503 : #ifndef HAVE_5ARGS_MREMAP
504 :
505 : static volatile int nCountThreadsInSigUSR1 = 0;
506 : static volatile int nWaitHelperThread = 0;
507 :
508 : /************************************************************************/
509 : /* CPLVirtualMemSIGUSR1Handler() */
510 : /************************************************************************/
511 :
512 : static void CPLVirtualMemSIGUSR1Handler(int /* signum_unused */,
513 : siginfo_t * /* the_info_unused */,
514 : void * /* the_ctxt_unused */)
515 : {
516 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
517 : fprintfstderr("entering CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
518 : #endif
519 : // Rouault guesses this is only POSIX correct if it is implemented by an
520 : // intrinsic.
521 : CPLAtomicInc(&nCountThreadsInSigUSR1);
522 : while (nWaitHelperThread)
523 : // Not explicitly indicated as signal-async-safe, but hopefully ok.
524 : usleep(1);
525 : CPLAtomicDec(&nCountThreadsInSigUSR1);
526 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
527 : fprintfstderr("leaving CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
528 : #endif
529 : }
530 : #endif
531 :
532 : /************************************************************************/
533 : /* CPLVirtualMemDeclareThread() */
534 : /************************************************************************/
535 :
536 2 : void CPLVirtualMemDeclareThread(CPLVirtualMem *ctxt)
537 : {
538 2 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
539 0 : return;
540 : #ifndef HAVE_5ARGS_MREMAP
541 : CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
542 : IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
543 : CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
544 : ctxtVMA->pahThreads = static_cast<pthread_t *>(CPLRealloc(
545 : ctxtVMA->pahThreads, (ctxtVMA->nThreads + 1) * sizeof(pthread_t)));
546 : ctxtVMA->pahThreads[ctxtVMA->nThreads] = pthread_self();
547 : ctxtVMA->nThreads++;
548 :
549 : CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
550 : #endif
551 : }
552 :
553 : /************************************************************************/
554 : /* CPLVirtualMemUnDeclareThread() */
555 : /************************************************************************/
556 :
557 2 : void CPLVirtualMemUnDeclareThread(CPLVirtualMem *ctxt)
558 : {
559 2 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
560 0 : return;
561 : #ifndef HAVE_5ARGS_MREMAP
562 : CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
563 : pthread_t self = pthread_self();
564 : IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
565 : CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
566 : for (int i = 0; i < ctxtVMA->nThreads; i++)
567 : {
568 : if (ctxtVMA->pahThreads[i] == self)
569 : {
570 : if (i < ctxtVMA->nThreads - 1)
571 : memmove(ctxtVMA->pahThreads + i + 1, ctxtVMA->pahThreads + i,
572 : (ctxtVMA->nThreads - 1 - i) * sizeof(pthread_t));
573 : ctxtVMA->nThreads--;
574 : break;
575 : }
576 : }
577 :
578 : CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
579 : #endif
580 : }
581 :
582 : /************************************************************************/
583 : /* CPLVirtualMemGetPageToFill() */
584 : /************************************************************************/
585 :
586 : // Must be paired with CPLVirtualMemAddPage.
587 66667 : static void *CPLVirtualMemGetPageToFill(CPLVirtualMemVMA *ctxt,
588 : void *start_page_addr)
589 : {
590 66667 : void *pPageToFill = nullptr;
591 :
592 66667 : if (ctxt->sBase.bSingleThreadUsage)
593 : {
594 0 : pPageToFill = start_page_addr;
595 0 : const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
596 : PROT_READ | PROT_WRITE);
597 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
598 : }
599 : else
600 : {
601 : #ifndef HAVE_5ARGS_MREMAP
602 : CPLAcquireMutex(ctxt->hMutexThreadArray, 1000.0);
603 : if (ctxt->nThreads == 1)
604 : {
605 : pPageToFill = start_page_addr;
606 : const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
607 : PROT_READ | PROT_WRITE);
608 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
609 : }
610 : else
611 : #endif
612 : {
613 : // Allocate a temporary writable page that the user
614 : // callback can fill.
615 : pPageToFill =
616 66667 : mmap(nullptr, ctxt->sBase.nPageSize, PROT_READ | PROT_WRITE,
617 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
618 66667 : IGNORE_OR_ASSERT_IN_DEBUG(pPageToFill != MAP_FAILED);
619 : }
620 : }
621 66667 : return pPageToFill;
622 : }
623 :
624 : /************************************************************************/
625 : /* CPLVirtualMemAddPage() */
626 : /************************************************************************/
627 :
628 66667 : static void CPLVirtualMemAddPage(CPLVirtualMemVMA *ctxt, void *target_addr,
629 : void *pPageToFill, OpType opType,
630 : pthread_t hRequesterThread)
631 : {
632 66667 : const int iPage =
633 66667 : static_cast<int>((static_cast<char *>(target_addr) -
634 66667 : static_cast<char *>(ctxt->sBase.pData)) /
635 66667 : ctxt->sBase.nPageSize);
636 66667 : if (ctxt->nLRUSize == ctxt->nCacheMaxSizeInPages)
637 : {
638 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
639 : fprintfstderr("uncaching page %d\n", iPage);
640 : #endif
641 66588 : int nOldPage = ctxt->panLRUPageIndices[ctxt->iLRUStart];
642 66588 : void *addr = static_cast<char *>(ctxt->sBase.pData) +
643 66588 : nOldPage * ctxt->sBase.nPageSize;
644 66588 : if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
645 0 : ctxt->pfnUnCachePage != nullptr &&
646 0 : TEST_BIT(ctxt->pabitRWMappedPages, nOldPage))
647 : {
648 0 : size_t nToBeEvicted = ctxt->sBase.nPageSize;
649 0 : if (static_cast<char *>(addr) + nToBeEvicted >=
650 0 : static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
651 0 : nToBeEvicted = static_cast<char *>(ctxt->sBase.pData) +
652 0 : ctxt->sBase.nSize - static_cast<char *>(addr);
653 :
654 0 : ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
655 0 : nOldPage * ctxt->sBase.nPageSize, addr,
656 : nToBeEvicted, ctxt->sBase.pCbkUserData);
657 : }
658 : // "Free" the least recently used page.
659 66588 : UNSET_BIT(ctxt->pabitMappedPages, nOldPage);
660 66588 : UNSET_BIT(ctxt->pabitRWMappedPages, nOldPage);
661 : // Free the old page.
662 : // Not sure how portable it is to do that that way.
663 : const void *const pRet =
664 66588 : mmap(addr, ctxt->sBase.nPageSize, PROT_NONE,
665 : MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
666 66588 : IGNORE_OR_ASSERT_IN_DEBUG(pRet == addr);
667 : // cppcheck-suppress memleak
668 : }
669 66667 : ctxt->panLRUPageIndices[ctxt->iLRUStart] = iPage;
670 66667 : ctxt->iLRUStart = (ctxt->iLRUStart + 1) % ctxt->nCacheMaxSizeInPages;
671 66667 : if (ctxt->nLRUSize < ctxt->nCacheMaxSizeInPages)
672 : {
673 79 : ctxt->nLRUSize++;
674 : }
675 66667 : SET_BIT(ctxt->pabitMappedPages, iPage);
676 :
677 66667 : if (ctxt->sBase.bSingleThreadUsage)
678 : {
679 0 : if (opType == OP_STORE &&
680 0 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
681 : {
682 : // Let (and mark) the page writable since the instruction that
683 : // triggered the fault is a store.
684 0 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
685 : }
686 0 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
687 : {
688 : const int nRet =
689 0 : mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
690 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
691 : }
692 : }
693 : else
694 : {
695 : #ifdef HAVE_5ARGS_MREMAP
696 : (void)hRequesterThread;
697 :
698 66667 : if (opType == OP_STORE &&
699 8 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
700 : {
701 : // Let (and mark) the page writable since the instruction that
702 : // triggered the fault is a store.
703 8 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
704 : }
705 66659 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
706 : {
707 : // Turn the temporary page read-only before remapping it.
708 : // Only turn it writtable when a new fault occurs (and the
709 : // mapping is writable).
710 : const int nRet =
711 69 : mprotect(pPageToFill, ctxt->sBase.nPageSize, PROT_READ);
712 69 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
713 : }
714 : /* Can now remap the pPageToFill onto the target page */
715 : const void *const pRet =
716 66667 : mremap(pPageToFill, ctxt->sBase.nPageSize, ctxt->sBase.nPageSize,
717 : MREMAP_MAYMOVE | MREMAP_FIXED, target_addr);
718 66667 : IGNORE_OR_ASSERT_IN_DEBUG(pRet == target_addr);
719 :
720 : #else
721 : if (ctxt->nThreads > 1)
722 : {
723 : /* Pause threads that share this mem view */
724 : CPLAtomicInc(&nWaitHelperThread);
725 :
726 : /* Install temporary SIGUSR1 signal handler */
727 : struct sigaction act, oldact;
728 : act.sa_sigaction = CPLVirtualMemSIGUSR1Handler;
729 : sigemptyset(&act.sa_mask);
730 : /* We don't want the sigsegv handler to be called when we are */
731 : /* running the sigusr1 handler */
732 : IGNORE_OR_ASSERT_IN_DEBUG(sigaddset(&act.sa_mask, SIGSEGV) == 0);
733 : act.sa_flags = 0;
734 : IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &act, &oldact) == 0);
735 :
736 : for (int i = 0; i < ctxt->nThreads; i++)
737 : {
738 : if (ctxt->pahThreads[i] != hRequesterThread)
739 : {
740 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
741 : fprintfstderr("stopping thread %X\n", ctxt->pahThreads[i]);
742 : #endif
743 : IGNORE_OR_ASSERT_IN_DEBUG(
744 : pthread_kill(ctxt->pahThreads[i], SIGUSR1) == 0);
745 : }
746 : }
747 :
748 : /* Wait that they are all paused */
749 : while (nCountThreadsInSigUSR1 != ctxt->nThreads - 1)
750 : usleep(1);
751 :
752 : /* Restore old SIGUSR1 signal handler */
753 : IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &oldact, nullptr) ==
754 : 0);
755 :
756 : int nRet = mprotect(target_addr, ctxt->sBase.nPageSize,
757 : PROT_READ | PROT_WRITE);
758 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
759 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
760 : fprintfstderr("memcpying page %d\n", iPage);
761 : #endif
762 : memcpy(target_addr, pPageToFill, ctxt->sBase.nPageSize);
763 :
764 : if (opType == OP_STORE &&
765 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
766 : {
767 : // Let (and mark) the page writable since the instruction that
768 : // triggered the fault is a store.
769 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
770 : }
771 : else
772 : {
773 : nRet = mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
774 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
775 : }
776 :
777 : /* Wake up sleeping threads */
778 : CPLAtomicDec(&nWaitHelperThread);
779 : while (nCountThreadsInSigUSR1 != 0)
780 : usleep(1);
781 :
782 : IGNORE_OR_ASSERT_IN_DEBUG(
783 : munmap(pPageToFill, ctxt->sBase.nPageSize) == 0);
784 : }
785 : else
786 : {
787 : if (opType == OP_STORE &&
788 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
789 : {
790 : // Let (and mark) the page writable since the instruction that
791 : // triggered the fault is a store.
792 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
793 : }
794 : else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
795 : {
796 : const int nRet2 =
797 : mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
798 : IGNORE_OR_ASSERT_IN_DEBUG(nRet2 == 0);
799 : }
800 : }
801 :
802 : CPLReleaseMutex(ctxt->hMutexThreadArray);
803 : #endif
804 : }
805 : // cppcheck-suppress memleak
806 66667 : }
807 :
808 : /************************************************************************/
809 : /* CPLVirtualMemGetOpTypeImm() */
810 : /************************************************************************/
811 :
812 : #if defined(__x86_64__) || defined(__i386__)
813 0 : static OpType CPLVirtualMemGetOpTypeImm(GByte val_rip)
814 : {
815 0 : OpType opType = OP_UNKNOWN;
816 0 : if ((/*val_rip >= 0x00 &&*/ val_rip <= 0x07) ||
817 0 : (val_rip >= 0x40 && val_rip <= 0x47)) // add $, (X)
818 0 : opType = OP_STORE;
819 0 : if ((val_rip >= 0x08 && val_rip <= 0x0f) ||
820 0 : (val_rip >= 0x48 && val_rip <= 0x4f)) // or $, (X)
821 0 : opType = OP_STORE;
822 0 : if ((val_rip >= 0x20 && val_rip <= 0x27) ||
823 0 : (val_rip >= 0x60 && val_rip <= 0x67)) // and $, (X)
824 0 : opType = OP_STORE;
825 0 : if ((val_rip >= 0x28 && val_rip <= 0x2f) ||
826 0 : (val_rip >= 0x68 && val_rip <= 0x6f)) // sub $, (X)
827 0 : opType = OP_STORE;
828 0 : if ((val_rip >= 0x30 && val_rip <= 0x37) ||
829 0 : (val_rip >= 0x70 && val_rip <= 0x77)) // xor $, (X)
830 0 : opType = OP_STORE;
831 0 : if ((val_rip >= 0x38 && val_rip <= 0x3f) ||
832 0 : (val_rip >= 0x78 && val_rip <= 0x7f)) // cmp $, (X)
833 0 : opType = OP_LOAD;
834 0 : return opType;
835 : }
836 : #endif
837 :
838 : /************************************************************************/
839 : /* CPLVirtualMemGetOpType() */
840 : /************************************************************************/
841 :
842 : // Don't need exhaustivity. It is just a hint for an optimization:
843 : // If the fault occurs on a store operation, then we can directly put
844 : // the page in writable mode if the mapping allows it.
845 :
846 : #if defined(__x86_64__) || defined(__i386__)
847 107434 : static OpType CPLVirtualMemGetOpType(const GByte *rip)
848 : {
849 107434 : OpType opType = OP_UNKNOWN;
850 :
851 : #if defined(__x86_64__) || defined(__i386__)
852 107434 : switch (rip[0])
853 : {
854 0 : case 0x00: /* add %al,(%rax) */
855 : case 0x01: /* add %eax,(%rax) */
856 0 : opType = OP_STORE;
857 0 : break;
858 0 : case 0x02: /* add (%rax),%al */
859 : case 0x03: /* add (%rax),%eax */
860 0 : opType = OP_LOAD;
861 0 : break;
862 :
863 0 : case 0x08: /* or %al,(%rax) */
864 : case 0x09: /* or %eax,(%rax) */
865 0 : opType = OP_STORE;
866 0 : break;
867 0 : case 0x0a: /* or (%rax),%al */
868 : case 0x0b: /* or (%rax),%eax */
869 0 : opType = OP_LOAD;
870 0 : break;
871 :
872 107407 : case 0x0f:
873 : {
874 107407 : switch (rip[1])
875 : {
876 107409 : case 0xb6: /* movzbl (%rax),%eax */
877 : case 0xb7: /* movzwl (%rax),%eax */
878 : case 0xbe: /* movsbl (%rax),%eax */
879 : case 0xbf: /* movswl (%rax),%eax */
880 107409 : opType = OP_LOAD;
881 107409 : break;
882 0 : default:
883 0 : break;
884 : }
885 107407 : break;
886 : }
887 8 : case 0xc6: /* movb $,(%rax) */
888 : case 0xc7: /* movl $,(%rax) */
889 8 : opType = OP_STORE;
890 8 : break;
891 :
892 0 : case 0x20: /* and %al,(%rax) */
893 : case 0x21: /* and %eax,(%rax) */
894 0 : opType = OP_STORE;
895 0 : break;
896 0 : case 0x22: /* and (%rax),%al */
897 : case 0x23: /* and (%rax),%eax */
898 0 : opType = OP_LOAD;
899 0 : break;
900 :
901 0 : case 0x28: /* sub %al,(%rax) */
902 : case 0x29: /* sub %eax,(%rax) */
903 0 : opType = OP_STORE;
904 0 : break;
905 0 : case 0x2a: /* sub (%rax),%al */
906 : case 0x2b: /* sub (%rax),%eax */
907 0 : opType = OP_LOAD;
908 0 : break;
909 :
910 0 : case 0x30: /* xor %al,(%rax) */
911 : case 0x31: /* xor %eax,(%rax) */
912 0 : opType = OP_STORE;
913 0 : break;
914 0 : case 0x32: /* xor (%rax),%al */
915 : case 0x33: /* xor (%rax),%eax */
916 0 : opType = OP_LOAD;
917 0 : break;
918 :
919 0 : case 0x38: /* cmp %al,(%rax) */
920 : case 0x39: /* cmp %eax,(%rax) */
921 0 : opType = OP_LOAD;
922 0 : break;
923 0 : case 0x40:
924 : {
925 0 : switch (rip[1])
926 : {
927 0 : case 0x00: /* add %spl,(%rax) */
928 0 : opType = OP_STORE;
929 0 : break;
930 0 : case 0x02: /* add (%rax),%spl */
931 0 : opType = OP_LOAD;
932 0 : break;
933 0 : case 0x28: /* sub %spl,(%rax) */
934 0 : opType = OP_STORE;
935 0 : break;
936 0 : case 0x2a: /* sub (%rax),%spl */
937 0 : opType = OP_LOAD;
938 0 : break;
939 0 : case 0x3a: /* cmp (%rax),%spl */
940 0 : opType = OP_LOAD;
941 0 : break;
942 0 : case 0x8a: /* mov (%rax),%spl */
943 0 : opType = OP_LOAD;
944 0 : break;
945 0 : default:
946 0 : break;
947 : }
948 0 : break;
949 : }
950 : #if defined(__x86_64__)
951 0 : case 0x41: /* reg=%al/%eax, X=%r8 */
952 : case 0x42: /* reg=%al/%eax, X=%rax,%r8,1 */
953 : case 0x43: /* reg=%al/%eax, X=%r8,%r8,1 */
954 : case 0x44: /* reg=%r8b/%r8w, X = %rax */
955 : case 0x45: /* reg=%r8b/%r8w, X = %r8 */
956 : case 0x46: /* reg=%r8b/%r8w, X = %rax,%r8,1 */
957 : case 0x47: /* reg=%r8b/%r8w, X = %r8,%r8,1 */
958 : {
959 0 : switch (rip[1])
960 : {
961 0 : case 0x00: /* add regb,(X) */
962 : case 0x01: /* add regl,(X) */
963 0 : opType = OP_STORE;
964 0 : break;
965 0 : case 0x02: /* add (X),regb */
966 : case 0x03: /* add (X),regl */
967 0 : opType = OP_LOAD;
968 0 : break;
969 0 : case 0x0f:
970 : {
971 0 : switch (rip[2])
972 : {
973 0 : case 0xb6: /* movzbl (X),regl */
974 : case 0xb7: /* movzwl (X),regl */
975 : case 0xbe: /* movsbl (X),regl */
976 : case 0xbf: /* movswl (X),regl */
977 0 : opType = OP_LOAD;
978 0 : break;
979 0 : default:
980 0 : break;
981 : }
982 0 : break;
983 : }
984 0 : case 0x28: /* sub regb,(X) */
985 : case 0x29: /* sub regl,(X) */
986 0 : opType = OP_STORE;
987 0 : break;
988 0 : case 0x2a: /* sub (X),regb */
989 : case 0x2b: /* sub (X),regl */
990 0 : opType = OP_LOAD;
991 0 : break;
992 0 : case 0x38: /* cmp regb,(X) */
993 : case 0x39: /* cmp regl,(X) */
994 0 : opType = OP_LOAD;
995 0 : break;
996 0 : case 0x80: /* cmpb,... $,(X) */
997 : case 0x81: /* cmpl,... $,(X) */
998 : case 0x83: /* cmpl,... $,(X) */
999 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1000 0 : break;
1001 0 : case 0x88: /* mov regb,(X) */
1002 : case 0x89: /* mov regl,(X) */
1003 0 : opType = OP_STORE;
1004 0 : break;
1005 0 : case 0x8a: /* mov (X),regb */
1006 : case 0x8b: /* mov (X),regl */
1007 0 : opType = OP_LOAD;
1008 0 : break;
1009 0 : case 0xc6: /* movb $,(X) */
1010 : case 0xc7: /* movl $,(X) */
1011 0 : opType = OP_STORE;
1012 0 : break;
1013 0 : case 0x84: /* test %al,(X) */
1014 0 : opType = OP_LOAD;
1015 0 : break;
1016 0 : case 0xf6: /* testb $,(X) or notb (X) */
1017 : case 0xf7: /* testl $,(X) or notl (X)*/
1018 : {
1019 0 : if (rip[2] < 0x10) /* test (X) */
1020 0 : opType = OP_LOAD;
1021 : else /* not (X) */
1022 0 : opType = OP_STORE;
1023 0 : break;
1024 : }
1025 0 : default:
1026 0 : break;
1027 : }
1028 0 : break;
1029 : }
1030 0 : case 0x48: /* reg=%rax, X=%rax or %rax,%rax,1 */
1031 : case 0x49: /* reg=%rax, X=%r8 or %r8,%rax,1 */
1032 : case 0x4a: /* reg=%rax, X=%rax,%r8,1 */
1033 : case 0x4b: /* reg=%rax, X=%r8,%r8,1 */
1034 : case 0x4c: /* reg=%r8, X=%rax or %rax,%rax,1 */
1035 : case 0x4d: /* reg=%r8, X=%r8 or %r8,%rax,1 */
1036 : case 0x4e: /* reg=%r8, X=%rax,%r8,1 */
1037 : case 0x4f: /* reg=%r8, X=%r8,%r8,1 */
1038 : {
1039 0 : switch (rip[1])
1040 : {
1041 0 : case 0x01: /* add reg,(X) */
1042 0 : opType = OP_STORE;
1043 0 : break;
1044 0 : case 0x03: /* add (X),reg */
1045 0 : opType = OP_LOAD;
1046 0 : break;
1047 :
1048 0 : case 0x09: /* or reg,(%rax) */
1049 0 : opType = OP_STORE;
1050 0 : break;
1051 0 : case 0x0b: /* or (%rax),reg */
1052 0 : opType = OP_LOAD;
1053 0 : break;
1054 0 : case 0x0f:
1055 : {
1056 0 : switch (rip[2])
1057 : {
1058 0 : case 0xc3: /* movnti reg,(X) */
1059 0 : opType = OP_STORE;
1060 0 : break;
1061 0 : default:
1062 0 : break;
1063 : }
1064 0 : break;
1065 : }
1066 0 : case 0x21: /* and reg,(X) */
1067 0 : opType = OP_STORE;
1068 0 : break;
1069 0 : case 0x23: /* and (X),reg */
1070 0 : opType = OP_LOAD;
1071 0 : break;
1072 :
1073 0 : case 0x29: /* sub reg,(X) */
1074 0 : opType = OP_STORE;
1075 0 : break;
1076 0 : case 0x2b: /* sub (X),reg */
1077 0 : opType = OP_LOAD;
1078 0 : break;
1079 :
1080 0 : case 0x31: /* xor reg,(X) */
1081 0 : opType = OP_STORE;
1082 0 : break;
1083 0 : case 0x33: /* xor (X),reg */
1084 0 : opType = OP_LOAD;
1085 0 : break;
1086 :
1087 0 : case 0x39: /* cmp reg,(X) */
1088 0 : opType = OP_LOAD;
1089 0 : break;
1090 :
1091 0 : case 0x81:
1092 : case 0x83:
1093 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1094 0 : break;
1095 :
1096 0 : case 0x85: /* test reg,(X) */
1097 0 : opType = OP_LOAD;
1098 0 : break;
1099 :
1100 0 : case 0x89: /* mov reg,(X) */
1101 0 : opType = OP_STORE;
1102 0 : break;
1103 0 : case 0x8b: /* mov (X),reg */
1104 0 : opType = OP_LOAD;
1105 0 : break;
1106 :
1107 0 : case 0xc7: /* movq $,(X) */
1108 0 : opType = OP_STORE;
1109 0 : break;
1110 :
1111 0 : case 0xf7:
1112 : {
1113 0 : if (rip[2] < 0x10) /* testq $,(X) */
1114 0 : opType = OP_LOAD;
1115 : else /* notq (X) */
1116 0 : opType = OP_STORE;
1117 0 : break;
1118 : }
1119 0 : default:
1120 0 : break;
1121 : }
1122 0 : break;
1123 : }
1124 : #endif
1125 0 : case 0x66:
1126 : {
1127 0 : switch (rip[1])
1128 : {
1129 0 : case 0x01: /* add %ax,(%rax) */
1130 0 : opType = OP_STORE;
1131 0 : break;
1132 0 : case 0x03: /* add (%rax),%ax */
1133 0 : opType = OP_LOAD;
1134 0 : break;
1135 0 : case 0x0f:
1136 : {
1137 0 : switch (rip[2])
1138 : {
1139 0 : case 0x2e: /* ucomisd (%rax),%xmm0 */
1140 0 : opType = OP_LOAD;
1141 0 : break;
1142 0 : case 0x6f: /* movdqa (%rax),%xmm0 */
1143 0 : opType = OP_LOAD;
1144 0 : break;
1145 0 : case 0x7f: /* movdqa %xmm0,(%rax) */
1146 0 : opType = OP_STORE;
1147 0 : break;
1148 0 : case 0xb6: /* movzbw (%rax),%ax */
1149 0 : opType = OP_LOAD;
1150 0 : break;
1151 0 : case 0xe7: /* movntdq %xmm0,(%rax) */
1152 0 : opType = OP_STORE;
1153 0 : break;
1154 0 : default:
1155 0 : break;
1156 : }
1157 0 : break;
1158 : }
1159 0 : case 0x29: /* sub %ax,(%rax) */
1160 0 : opType = OP_STORE;
1161 0 : break;
1162 0 : case 0x2b: /* sub (%rax),%ax */
1163 0 : opType = OP_LOAD;
1164 0 : break;
1165 0 : case 0x39: /* cmp %ax,(%rax) */
1166 0 : opType = OP_LOAD;
1167 0 : break;
1168 : #if defined(__x86_64__)
1169 0 : case 0x41: /* reg = %ax (or %xmm0), X = %r8 */
1170 : case 0x42: /* reg = %ax (or %xmm0), X = %rax,%r8,1 */
1171 : case 0x43: /* reg = %ax (or %xmm0), X = %r8,%r8,1 */
1172 : case 0x44: /* reg = %r8w (or %xmm8), X = %rax */
1173 : case 0x45: /* reg = %r8w (or %xmm8), X = %r8 */
1174 : case 0x46: /* reg = %r8w (or %xmm8), X = %rax,%r8,1 */
1175 : case 0x47: /* reg = %r8w (or %xmm8), X = %r8,%r8,1 */
1176 : {
1177 0 : switch (rip[2])
1178 : {
1179 0 : case 0x01: /* add reg,(X) */
1180 0 : opType = OP_STORE;
1181 0 : break;
1182 0 : case 0x03: /* add (X),reg */
1183 0 : opType = OP_LOAD;
1184 0 : break;
1185 0 : case 0x0f:
1186 : {
1187 0 : switch (rip[3])
1188 : {
1189 0 : case 0x2e: /* ucomisd (X),reg */
1190 0 : opType = OP_LOAD;
1191 0 : break;
1192 0 : case 0x6f: /* movdqa (X),reg */
1193 0 : opType = OP_LOAD;
1194 0 : break;
1195 0 : case 0x7f: /* movdqa reg,(X) */
1196 0 : opType = OP_STORE;
1197 0 : break;
1198 0 : case 0xb6: /* movzbw (X),reg */
1199 0 : opType = OP_LOAD;
1200 0 : break;
1201 0 : case 0xe7: /* movntdq reg,(X) */
1202 0 : opType = OP_STORE;
1203 0 : break;
1204 0 : default:
1205 0 : break;
1206 : }
1207 0 : break;
1208 : }
1209 0 : case 0x29: /* sub reg,(X) */
1210 0 : opType = OP_STORE;
1211 0 : break;
1212 0 : case 0x2b: /* sub (X),reg */
1213 0 : opType = OP_LOAD;
1214 0 : break;
1215 0 : case 0x39: /* cmp reg,(X) */
1216 0 : opType = OP_LOAD;
1217 0 : break;
1218 0 : case 0x81: /* cmpw,... $,(X) */
1219 : case 0x83: /* cmpw,... $,(X) */
1220 0 : opType = CPLVirtualMemGetOpTypeImm(rip[3]);
1221 0 : break;
1222 0 : case 0x85: /* test reg,(X) */
1223 0 : opType = OP_LOAD;
1224 0 : break;
1225 0 : case 0x89: /* mov reg,(X) */
1226 0 : opType = OP_STORE;
1227 0 : break;
1228 0 : case 0x8b: /* mov (X),reg */
1229 0 : opType = OP_LOAD;
1230 0 : break;
1231 0 : case 0xc7: /* movw $,(X) */
1232 0 : opType = OP_STORE;
1233 0 : break;
1234 0 : case 0xf7:
1235 : {
1236 0 : if (rip[3] < 0x10) /* testw $,(X) */
1237 0 : opType = OP_LOAD;
1238 : else /* notw (X) */
1239 0 : opType = OP_STORE;
1240 0 : break;
1241 : }
1242 0 : default:
1243 0 : break;
1244 : }
1245 0 : break;
1246 : }
1247 : #endif
1248 0 : case 0x81: /* cmpw,... $,(%rax) */
1249 : case 0x83: /* cmpw,... $,(%rax) */
1250 0 : opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1251 0 : break;
1252 :
1253 0 : case 0x85: /* test %ax,(%rax) */
1254 0 : opType = OP_LOAD;
1255 0 : break;
1256 0 : case 0x89: /* mov %ax,(%rax) */
1257 0 : opType = OP_STORE;
1258 0 : break;
1259 0 : case 0x8b: /* mov (%rax),%ax */
1260 0 : opType = OP_LOAD;
1261 0 : break;
1262 0 : case 0xc7: /* movw $,(%rax) */
1263 0 : opType = OP_STORE;
1264 0 : break;
1265 0 : case 0xf3:
1266 : {
1267 0 : switch (rip[2])
1268 : {
1269 0 : case 0xa5: /* rep movsw %ds:(%rsi),%es:(%rdi) */
1270 0 : opType = OP_MOVS_RSI_RDI;
1271 0 : break;
1272 0 : default:
1273 0 : break;
1274 : }
1275 0 : break;
1276 : }
1277 0 : case 0xf7: /* testw $,(%rax) or notw (%rax) */
1278 : {
1279 0 : if (rip[2] < 0x10) /* test */
1280 0 : opType = OP_LOAD;
1281 : else /* not */
1282 0 : opType = OP_STORE;
1283 0 : break;
1284 : }
1285 0 : default:
1286 0 : break;
1287 : }
1288 0 : break;
1289 : }
1290 0 : case 0x80: /* cmpb,... $,(%rax) */
1291 : case 0x81: /* cmpl,... $,(%rax) */
1292 : case 0x83: /* cmpl,... $,(%rax) */
1293 0 : opType = CPLVirtualMemGetOpTypeImm(rip[1]);
1294 0 : break;
1295 0 : case 0x84: /* test %al,(%rax) */
1296 : case 0x85: /* test %eax,(%rax) */
1297 0 : opType = OP_LOAD;
1298 0 : break;
1299 0 : case 0x88: /* mov %al,(%rax) */
1300 0 : opType = OP_STORE;
1301 0 : break;
1302 0 : case 0x89: /* mov %eax,(%rax) */
1303 0 : opType = OP_STORE;
1304 0 : break;
1305 0 : case 0x8a: /* mov (%rax),%al */
1306 0 : opType = OP_LOAD;
1307 0 : break;
1308 0 : case 0x8b: /* mov (%rax),%eax */
1309 0 : opType = OP_LOAD;
1310 0 : break;
1311 0 : case 0xd9: /* 387 float */
1312 : {
1313 0 : if (rip[1] < 0x08) /* flds (%eax) */
1314 0 : opType = OP_LOAD;
1315 0 : else if (rip[1] >= 0x18 && rip[1] <= 0x20) /* fstps (%eax) */
1316 0 : opType = OP_STORE;
1317 0 : break;
1318 : }
1319 0 : case 0xf2: /* SSE 2 */
1320 : {
1321 0 : switch (rip[1])
1322 : {
1323 0 : case 0x0f:
1324 : {
1325 0 : switch (rip[2])
1326 : {
1327 0 : case 0x10: /* movsd (%rax),%xmm0 */
1328 0 : opType = OP_LOAD;
1329 0 : break;
1330 0 : case 0x11: /* movsd %xmm0,(%rax) */
1331 0 : opType = OP_STORE;
1332 0 : break;
1333 0 : case 0x58: /* addsd (%rax),%xmm0 */
1334 0 : opType = OP_LOAD;
1335 0 : break;
1336 0 : case 0x59: /* mulsd (%rax),%xmm0 */
1337 0 : opType = OP_LOAD;
1338 0 : break;
1339 0 : case 0x5c: /* subsd (%rax),%xmm0 */
1340 0 : opType = OP_LOAD;
1341 0 : break;
1342 0 : case 0x5e: /* divsd (%rax),%xmm0 */
1343 0 : opType = OP_LOAD;
1344 0 : break;
1345 0 : default:
1346 0 : break;
1347 : }
1348 0 : break;
1349 : }
1350 : #if defined(__x86_64__)
1351 0 : case 0x41: /* reg=%xmm0, X=%r8 or %r8,%rax,1 */
1352 : case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1353 : case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1354 : case 0x44: /* reg=%xmm8, X=%rax or %rax,%rax,1*/
1355 : case 0x45: /* reg=%xmm8, X=%r8 or %r8,%rax,1 */
1356 : case 0x46: /* reg=%xmm8, X=%rax,%r8,1 */
1357 : case 0x47: /* reg=%xmm8, X=%r8,%r8,1 */
1358 : {
1359 0 : switch (rip[2])
1360 : {
1361 0 : case 0x0f:
1362 : {
1363 0 : switch (rip[3])
1364 : {
1365 0 : case 0x10: /* movsd (X),reg */
1366 0 : opType = OP_LOAD;
1367 0 : break;
1368 0 : case 0x11: /* movsd reg,(X) */
1369 0 : opType = OP_STORE;
1370 0 : break;
1371 0 : case 0x58: /* addsd (X),reg */
1372 0 : opType = OP_LOAD;
1373 0 : break;
1374 0 : case 0x59: /* mulsd (X),reg */
1375 0 : opType = OP_LOAD;
1376 0 : break;
1377 0 : case 0x5c: /* subsd (X),reg */
1378 0 : opType = OP_LOAD;
1379 0 : break;
1380 0 : case 0x5e: /* divsd (X),reg */
1381 0 : opType = OP_LOAD;
1382 0 : break;
1383 0 : default:
1384 0 : break;
1385 : }
1386 0 : break;
1387 : }
1388 0 : default:
1389 0 : break;
1390 : }
1391 0 : break;
1392 : }
1393 : #endif
1394 0 : default:
1395 0 : break;
1396 : }
1397 0 : break;
1398 : }
1399 6 : case 0xf3:
1400 : {
1401 6 : switch (rip[1])
1402 : {
1403 0 : case 0x0f: /* SSE 2 */
1404 : {
1405 0 : switch (rip[2])
1406 : {
1407 0 : case 0x10: /* movss (%rax),%xmm0 */
1408 0 : opType = OP_LOAD;
1409 0 : break;
1410 0 : case 0x11: /* movss %xmm0,(%rax) */
1411 0 : opType = OP_STORE;
1412 0 : break;
1413 0 : case 0x6f: /* movdqu (%rax),%xmm0 */
1414 0 : opType = OP_LOAD;
1415 0 : break;
1416 0 : case 0x7f: /* movdqu %xmm0,(%rax) */
1417 0 : opType = OP_STORE;
1418 0 : break;
1419 0 : default:
1420 0 : break;
1421 : }
1422 0 : break;
1423 : }
1424 : #if defined(__x86_64__)
1425 4 : case 0x41: /* reg=%xmm0, X=%r8 */
1426 : case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1427 : case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1428 : case 0x44: /* reg=%xmm8, X = %rax */
1429 : case 0x45: /* reg=%xmm8, X = %r8 */
1430 : case 0x46: /* reg=%xmm8, X = %rax,%r8,1 */
1431 : case 0x47: /* reg=%xmm8, X = %r8,%r8,1 */
1432 : {
1433 4 : switch (rip[2])
1434 : {
1435 4 : case 0x0f: /* SSE 2 */
1436 : {
1437 4 : switch (rip[3])
1438 : {
1439 0 : case 0x10: /* movss (X),reg */
1440 0 : opType = OP_LOAD;
1441 0 : break;
1442 0 : case 0x11: /* movss reg,(X) */
1443 0 : opType = OP_STORE;
1444 0 : break;
1445 4 : case 0x6f: /* movdqu (X),reg */
1446 4 : opType = OP_LOAD;
1447 4 : break;
1448 0 : case 0x7f: /* movdqu reg,(X) */
1449 0 : opType = OP_STORE;
1450 0 : break;
1451 0 : default:
1452 0 : break;
1453 : }
1454 4 : break;
1455 : }
1456 0 : default:
1457 0 : break;
1458 : }
1459 4 : break;
1460 : }
1461 0 : case 0x48:
1462 : {
1463 0 : switch (rip[2])
1464 : {
1465 0 : case 0xa5: /* rep movsq %ds:(%rsi),%es:(%rdi) */
1466 0 : opType = OP_MOVS_RSI_RDI;
1467 0 : break;
1468 0 : default:
1469 0 : break;
1470 : }
1471 0 : break;
1472 : }
1473 : #endif
1474 0 : case 0xa4: /* rep movsb %ds:(%rsi),%es:(%rdi) */
1475 : case 0xa5: /* rep movsl %ds:(%rsi),%es:(%rdi) */
1476 0 : opType = OP_MOVS_RSI_RDI;
1477 0 : break;
1478 0 : case 0xa6: /* repz cmpsb %es:(%rdi),%ds:(%rsi) */
1479 0 : opType = OP_LOAD;
1480 0 : break;
1481 2 : default:
1482 2 : break;
1483 : }
1484 6 : break;
1485 : }
1486 0 : case 0xf6: /* testb $,(%rax) or notb (%rax) */
1487 : case 0xf7: /* testl $,(%rax) or notl (%rax) */
1488 : {
1489 0 : if (rip[1] < 0x10) /* test */
1490 0 : opType = OP_LOAD;
1491 : else /* not */
1492 0 : opType = OP_STORE;
1493 0 : break;
1494 : }
1495 13 : default:
1496 13 : break;
1497 : }
1498 : #endif
1499 107434 : return opType;
1500 : }
1501 : #endif
1502 :
1503 : /************************************************************************/
1504 : /* CPLVirtualMemManagerPinAddrInternal() */
1505 : /************************************************************************/
1506 :
1507 : static int
1508 107432 : CPLVirtualMemManagerPinAddrInternal(CPLVirtualMemMsgToWorkerThread *msg)
1509 : {
1510 107432 : char wait_ready = '\0';
1511 107432 : char response_buf[4] = {};
1512 :
1513 : // Wait for the helper thread to be ready to process another request.
1514 : while (true)
1515 : {
1516 : const int ret = static_cast<int>(
1517 107432 : read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1));
1518 107437 : if (ret < 0 && errno == EINTR)
1519 : {
1520 : // NOP
1521 : }
1522 : else
1523 : {
1524 107437 : IGNORE_OR_ASSERT_IN_DEBUG(ret == 1);
1525 107437 : break;
1526 : }
1527 0 : }
1528 :
1529 : // Pass the address that caused the fault to the helper thread.
1530 : const ssize_t nRetWrite =
1531 107437 : write(pVirtualMemManager->pipefd_to_thread[1], msg, sizeof(*msg));
1532 107437 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(*msg));
1533 :
1534 : // Wait that the helper thread has fixed the fault.
1535 : while (true)
1536 : {
1537 : const int ret = static_cast<int>(
1538 107437 : read(pVirtualMemManager->pipefd_from_thread[0], response_buf, 4));
1539 107437 : if (ret < 0 && errno == EINTR)
1540 : {
1541 : // NOP
1542 : }
1543 : else
1544 : {
1545 107437 : IGNORE_OR_ASSERT_IN_DEBUG(ret == 4);
1546 107437 : break;
1547 : }
1548 0 : }
1549 :
1550 : // In case the helper thread did not recognize the address as being
1551 : // one that it should take care of, just rely on the previous SIGSEGV
1552 : // handler (with might abort the process).
1553 107437 : return (memcmp(response_buf, MAPPING_FOUND, 4) == 0);
1554 : }
1555 :
1556 : /************************************************************************/
1557 : /* CPLVirtualMemPin() */
1558 : /************************************************************************/
1559 :
1560 0 : void CPLVirtualMemPin(CPLVirtualMem *ctxt, void *pAddr, size_t nSize,
1561 : int bWriteOp)
1562 : {
1563 0 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
1564 0 : return;
1565 :
1566 : CPLVirtualMemMsgToWorkerThread msg;
1567 :
1568 0 : memset(&msg, 0, sizeof(msg));
1569 0 : msg.hRequesterThread = pthread_self();
1570 0 : msg.opType = (bWriteOp) ? OP_STORE : OP_LOAD;
1571 :
1572 0 : char *pBase = reinterpret_cast<char *>(ALIGN_DOWN(pAddr, ctxt->nPageSize));
1573 0 : const size_t n = (reinterpret_cast<char *>(pAddr) - pBase + nSize +
1574 0 : ctxt->nPageSize - 1) /
1575 0 : ctxt->nPageSize;
1576 0 : for (size_t i = 0; i < n; i++)
1577 : {
1578 0 : msg.pFaultAddr = reinterpret_cast<char *>(pBase) + i * ctxt->nPageSize;
1579 0 : CPLVirtualMemManagerPinAddrInternal(&msg);
1580 : }
1581 : }
1582 :
1583 : /************************************************************************/
1584 : /* CPLVirtualMemManagerSIGSEGVHandler() */
1585 : /************************************************************************/
1586 :
1587 : #if defined(__x86_64__)
1588 : #define REG_IP REG_RIP
1589 : #define REG_SI REG_RSI
1590 : #define REG_DI REG_RDI
1591 : #elif defined(__i386__)
1592 : #define REG_IP REG_EIP
1593 : #define REG_SI REG_ESI
1594 : #define REG_DI REG_EDI
1595 : #endif
1596 :
1597 : // Must take care of only using "asynchronous-signal-safe" functions in a signal
1598 : // handler pthread_self(), read() and write() are such. See:
1599 : // https://www.securecoding.cert.org/confluence/display/seccode/SIG30-C.+Call+only+asynchronous-safe+functions+within+signal+handlers
1600 107434 : static void CPLVirtualMemManagerSIGSEGVHandler(int the_signal,
1601 : siginfo_t *the_info,
1602 : void *the_ctxt)
1603 : {
1604 : CPLVirtualMemMsgToWorkerThread msg;
1605 :
1606 107434 : memset(&msg, 0, sizeof(msg));
1607 107434 : msg.pFaultAddr = the_info->si_addr;
1608 107434 : msg.hRequesterThread = pthread_self();
1609 :
1610 : #if defined(__x86_64__) || defined(__i386__)
1611 107434 : ucontext_t *the_ucontext = static_cast<ucontext_t *>(the_ctxt);
1612 107434 : const GByte *rip = reinterpret_cast<const GByte *>(
1613 107434 : the_ucontext->uc_mcontext.gregs[REG_IP]);
1614 107434 : msg.opType = CPLVirtualMemGetOpType(rip);
1615 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1616 : fprintfstderr("at rip %p, bytes: %02x %02x %02x %02x\n", rip, rip[0],
1617 : rip[1], rip[2], rip[3]);
1618 : #endif
1619 107431 : if (msg.opType == OP_MOVS_RSI_RDI)
1620 : {
1621 0 : void *rsi =
1622 0 : reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_SI]);
1623 0 : void *rdi =
1624 0 : reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_DI]);
1625 :
1626 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1627 : fprintfstderr("fault=%p rsi=%p rsi=%p\n", msg.pFaultAddr, rsi, rdi);
1628 : #endif
1629 0 : if (msg.pFaultAddr == rsi)
1630 : {
1631 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1632 : fprintfstderr("load\n");
1633 : #endif
1634 0 : msg.opType = OP_LOAD;
1635 : }
1636 0 : else if (msg.pFaultAddr == rdi)
1637 : {
1638 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1639 : fprintfstderr("store\n");
1640 : #endif
1641 0 : msg.opType = OP_STORE;
1642 : }
1643 : }
1644 : #ifdef DEBUG_VIRTUALMEM
1645 : else if (msg.opType == OP_UNKNOWN)
1646 : {
1647 : static bool bHasWarned = false;
1648 : if (!bHasWarned)
1649 : {
1650 : bHasWarned = true;
1651 : fprintfstderr("at rip %p, unknown bytes: %02x %02x %02x %02x\n",
1652 : rip, rip[0], rip[1], rip[2], rip[3]);
1653 : }
1654 : }
1655 : #endif
1656 : #else
1657 : msg.opType = OP_UNKNOWN;
1658 : #endif
1659 :
1660 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1661 : fprintfstderr("entering handler for %X (addr=%p)\n", pthread_self(),
1662 : the_info->si_addr);
1663 : #endif
1664 :
1665 107431 : if (the_info->si_code != SEGV_ACCERR)
1666 : {
1667 0 : pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1668 0 : return;
1669 : }
1670 :
1671 107431 : if (!CPLVirtualMemManagerPinAddrInternal(&msg))
1672 : {
1673 : // In case the helper thread did not recognize the address as being
1674 : // one that it should take care of, just rely on the previous SIGSEGV
1675 : // handler (with might abort the process).
1676 0 : pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1677 : }
1678 :
1679 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1680 : fprintfstderr("leaving handler for %X (addr=%p)\n", pthread_self(),
1681 : the_info->si_addr);
1682 : #endif
1683 : }
1684 :
1685 : /************************************************************************/
1686 : /* CPLVirtualMemManagerThread() */
1687 : /************************************************************************/
1688 :
1689 107439 : static void CPLVirtualMemManagerThread(void * /* unused_param */)
1690 : {
1691 : while (true)
1692 : {
1693 107439 : char i_m_ready = 1;
1694 107439 : CPLVirtualMemVMA *ctxt = nullptr;
1695 107439 : bool bMappingFound = false;
1696 : CPLVirtualMemMsgToWorkerThread msg;
1697 :
1698 : // Signal that we are ready to process a new request.
1699 : ssize_t nRetWrite =
1700 107439 : write(pVirtualMemManager->pipefd_wait_thread[1], &i_m_ready, 1);
1701 107439 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 1);
1702 :
1703 : // Fetch the address to process.
1704 : const ssize_t nRetRead =
1705 107439 : read(pVirtualMemManager->pipefd_to_thread[0], &msg, sizeof(msg));
1706 107438 : IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == sizeof(msg));
1707 :
1708 : // If CPLVirtualMemManagerTerminate() is called, it will use BYEBYE_ADDR
1709 : // as a means to ask for our termination.
1710 107438 : if (msg.pFaultAddr == BYEBYE_ADDR)
1711 1 : break;
1712 :
1713 : /* Lookup for a mapping that contains addr */
1714 107437 : CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
1715 107699 : for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
1716 : {
1717 107699 : ctxt = pVirtualMemManager->pasVirtualMem[i];
1718 107699 : if (static_cast<char *>(msg.pFaultAddr) >=
1719 107699 : static_cast<char *>(ctxt->sBase.pData) &&
1720 107455 : static_cast<char *>(msg.pFaultAddr) <
1721 107455 : static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
1722 : {
1723 107437 : bMappingFound = true;
1724 107437 : break;
1725 : }
1726 : }
1727 107437 : CPLReleaseMutex(hVirtualMemManagerMutex);
1728 :
1729 107437 : if (bMappingFound)
1730 : {
1731 107437 : char *const start_page_addr = static_cast<char *>(
1732 107437 : ALIGN_DOWN(msg.pFaultAddr, ctxt->sBase.nPageSize));
1733 107437 : const int iPage =
1734 107437 : static_cast<int>((static_cast<char *>(start_page_addr) -
1735 107437 : static_cast<char *>(ctxt->sBase.pData)) /
1736 107437 : ctxt->sBase.nPageSize);
1737 :
1738 107437 : if (iPage == ctxt->iLastPage)
1739 : {
1740 : // In case 2 threads try to access the same page concurrently it
1741 : // is possible that we are asked to mapped the page again
1742 : // whereas it is always mapped. However, if that number of
1743 : // successive retries is too high, this is certainly a sign that
1744 : // something else happen, like trying to write-access a
1745 : // read-only page 100 is a bit of magic number. Rouault believes
1746 : // it must be at least the number of concurrent threads. 100
1747 : // seems to be really safe!
1748 37545 : ctxt->nRetry++;
1749 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1750 : fprintfstderr("retry on page %d : %d\n", iPage, ctxt->nRetry);
1751 : #endif
1752 37545 : if (ctxt->nRetry >= 100)
1753 : {
1754 0 : CPLError(CE_Failure, CPLE_AppDefined,
1755 : "CPLVirtualMemManagerThread: trying to "
1756 : "write into read-only mapping");
1757 0 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1758 : MAPPING_NOT_FOUND, 4);
1759 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1760 0 : break;
1761 : }
1762 37545 : else if (msg.opType != OP_LOAD &&
1763 5 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1764 5 : !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1765 : {
1766 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1767 : fprintfstderr("switching page %d to write mode\n", iPage);
1768 : #endif
1769 5 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
1770 : const int nRet =
1771 5 : mprotect(start_page_addr, ctxt->sBase.nPageSize,
1772 : PROT_READ | PROT_WRITE);
1773 5 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1774 : }
1775 : }
1776 : else
1777 : {
1778 69892 : ctxt->iLastPage = iPage;
1779 69892 : ctxt->nRetry = 0;
1780 :
1781 69892 : if (TEST_BIT(ctxt->pabitMappedPages, iPage))
1782 : {
1783 3225 : if (msg.opType != OP_LOAD &&
1784 0 : ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1785 0 : !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1786 : {
1787 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1788 : fprintfstderr("switching page %d to write mode\n",
1789 : iPage);
1790 : #endif
1791 0 : SET_BIT(ctxt->pabitRWMappedPages, iPage);
1792 : const int nRet =
1793 0 : mprotect(start_page_addr, ctxt->sBase.nPageSize,
1794 : PROT_READ | PROT_WRITE);
1795 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1796 : }
1797 : else
1798 : {
1799 : #if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1800 : fprintfstderr("unexpected case for page %d\n", iPage);
1801 : #endif
1802 : }
1803 : }
1804 : else
1805 : {
1806 : void *const pPageToFill =
1807 66667 : CPLVirtualMemGetPageToFill(ctxt, start_page_addr);
1808 :
1809 66667 : size_t nToFill = ctxt->sBase.nPageSize;
1810 66667 : if (start_page_addr + nToFill >=
1811 66667 : static_cast<char *>(ctxt->sBase.pData) +
1812 66667 : ctxt->sBase.nSize)
1813 : {
1814 22212 : nToFill = static_cast<char *>(ctxt->sBase.pData) +
1815 22212 : ctxt->sBase.nSize - start_page_addr;
1816 : }
1817 :
1818 66667 : ctxt->pfnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
1819 66667 : start_page_addr - static_cast<char *>(
1820 66667 : ctxt->sBase.pData),
1821 : pPageToFill, nToFill,
1822 : ctxt->sBase.pCbkUserData);
1823 :
1824 : // Now remap this page to its target address and
1825 : // register it in the LRU.
1826 66667 : CPLVirtualMemAddPage(ctxt, start_page_addr, pPageToFill,
1827 : msg.opType, msg.hRequesterThread);
1828 : }
1829 : }
1830 :
1831 : // Warn the segfault handler that we have finished our job.
1832 107437 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1833 : MAPPING_FOUND, 4);
1834 107437 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1835 : }
1836 : else
1837 : {
1838 : // Warn the segfault handler that we have finished our job
1839 : // but that the fault didn't occur in a memory range that
1840 : // is under our responsibility.
1841 0 : CPLError(CE_Failure, CPLE_AppDefined,
1842 : "CPLVirtualMemManagerThread: no mapping found");
1843 0 : nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1844 : MAPPING_NOT_FOUND, 4);
1845 0 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1846 : }
1847 107437 : }
1848 1 : }
1849 :
1850 : /************************************************************************/
1851 : /* CPLVirtualMemManagerInit() */
1852 : /************************************************************************/
1853 :
1854 17 : static bool CPLVirtualMemManagerInit()
1855 : {
1856 34 : CPLMutexHolderD(&hVirtualMemManagerMutex);
1857 17 : if (pVirtualMemManager != nullptr)
1858 15 : return true;
1859 :
1860 : struct sigaction act;
1861 2 : pVirtualMemManager = static_cast<CPLVirtualMemManager *>(
1862 2 : VSI_MALLOC_VERBOSE(sizeof(CPLVirtualMemManager)));
1863 2 : if (pVirtualMemManager == nullptr)
1864 0 : return false;
1865 2 : pVirtualMemManager->pasVirtualMem = nullptr;
1866 2 : pVirtualMemManager->nVirtualMemCount = 0;
1867 2 : int nRet = pipe(pVirtualMemManager->pipefd_to_thread);
1868 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1869 2 : nRet = pipe(pVirtualMemManager->pipefd_from_thread);
1870 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1871 2 : nRet = pipe(pVirtualMemManager->pipefd_wait_thread);
1872 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1873 :
1874 : // Install our custom SIGSEGV handler.
1875 2 : act.sa_sigaction = CPLVirtualMemManagerSIGSEGVHandler;
1876 2 : sigemptyset(&act.sa_mask);
1877 2 : act.sa_flags = SA_SIGINFO;
1878 2 : nRet = sigaction(SIGSEGV, &act, &pVirtualMemManager->oldact);
1879 2 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1880 :
1881 : // Starts the helper thread.
1882 4 : pVirtualMemManager->hHelperThread =
1883 2 : CPLCreateJoinableThread(CPLVirtualMemManagerThread, nullptr);
1884 2 : if (pVirtualMemManager->hHelperThread == nullptr)
1885 : {
1886 0 : VSIFree(pVirtualMemManager);
1887 0 : pVirtualMemManager = nullptr;
1888 0 : return false;
1889 : }
1890 2 : return true;
1891 : }
1892 :
1893 : /************************************************************************/
1894 : /* CPLVirtualMemManagerTerminate() */
1895 : /************************************************************************/
1896 :
1897 1 : void CPLVirtualMemManagerTerminate(void)
1898 : {
1899 1 : if (pVirtualMemManager == nullptr)
1900 0 : return;
1901 :
1902 : CPLVirtualMemMsgToWorkerThread msg;
1903 1 : msg.pFaultAddr = BYEBYE_ADDR;
1904 1 : msg.opType = OP_UNKNOWN;
1905 : memset(&msg.hRequesterThread, 0, sizeof(msg.hRequesterThread));
1906 :
1907 : // Wait for the helper thread to be ready.
1908 : char wait_ready;
1909 : const ssize_t nRetRead =
1910 1 : read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1);
1911 1 : IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == 1);
1912 :
1913 : // Ask it to terminate.
1914 : const ssize_t nRetWrite =
1915 1 : write(pVirtualMemManager->pipefd_to_thread[1], &msg, sizeof(msg));
1916 1 : IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(msg));
1917 :
1918 : // Wait for its termination.
1919 1 : CPLJoinThread(pVirtualMemManager->hHelperThread);
1920 :
1921 : // Cleanup everything.
1922 1 : while (pVirtualMemManager->nVirtualMemCount > 0)
1923 0 : CPLVirtualMemFree(reinterpret_cast<CPLVirtualMem *>(
1924 : pVirtualMemManager
1925 0 : ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount - 1]));
1926 1 : CPLFree(pVirtualMemManager->pasVirtualMem);
1927 :
1928 1 : close(pVirtualMemManager->pipefd_to_thread[0]);
1929 1 : close(pVirtualMemManager->pipefd_to_thread[1]);
1930 1 : close(pVirtualMemManager->pipefd_from_thread[0]);
1931 1 : close(pVirtualMemManager->pipefd_from_thread[1]);
1932 1 : close(pVirtualMemManager->pipefd_wait_thread[0]);
1933 1 : close(pVirtualMemManager->pipefd_wait_thread[1]);
1934 :
1935 : // Restore previous handler.
1936 1 : sigaction(SIGSEGV, &pVirtualMemManager->oldact, nullptr);
1937 :
1938 1 : CPLFree(pVirtualMemManager);
1939 1 : pVirtualMemManager = nullptr;
1940 :
1941 1 : CPLDestroyMutex(hVirtualMemManagerMutex);
1942 1 : hVirtualMemManagerMutex = nullptr;
1943 : }
1944 :
1945 : #else // HAVE_VIRTUAL_MEM_VMA
1946 :
1947 : CPLVirtualMem *CPLVirtualMemNew(
1948 : size_t /* nSize */, size_t /* nCacheSize */, size_t /* nPageSizeHint */,
1949 : int /* bSingleThreadUsage */, CPLVirtualMemAccessMode /* eAccessMode */,
1950 : CPLVirtualMemCachePageCbk /* pfnCachePage */,
1951 : CPLVirtualMemUnCachePageCbk /* pfnUnCachePage */,
1952 : CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
1953 : {
1954 : CPLError(CE_Failure, CPLE_NotSupported,
1955 : "CPLVirtualMemNew() unsupported on "
1956 : "this operating system / configuration");
1957 : return nullptr;
1958 : }
1959 :
1960 : void CPLVirtualMemDeclareThread(CPLVirtualMem * /* ctxt */)
1961 : {
1962 : }
1963 :
1964 : void CPLVirtualMemUnDeclareThread(CPLVirtualMem * /* ctxt */)
1965 : {
1966 : }
1967 :
1968 : void CPLVirtualMemPin(CPLVirtualMem * /* ctxt */, void * /* pAddr */,
1969 : size_t /* nSize */, int /* bWriteOp */)
1970 : {
1971 : }
1972 :
1973 : void CPLVirtualMemManagerTerminate(void)
1974 : {
1975 : }
1976 :
1977 : #endif // HAVE_VIRTUAL_MEM_VMA
1978 :
1979 : #ifdef HAVE_MMAP
1980 :
1981 : /************************************************************************/
1982 : /* CPLVirtualMemFreeFileMemoryMapped() */
1983 : /************************************************************************/
1984 :
1985 28 : static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMem *ctxt)
1986 : {
1987 28 : const size_t nMappingSize = ctxt->nSize +
1988 28 : static_cast<GByte *>(ctxt->pData) -
1989 28 : static_cast<GByte *>(ctxt->pDataToFree);
1990 28 : const int nRet = munmap(ctxt->pDataToFree, nMappingSize);
1991 28 : IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1992 28 : }
1993 :
1994 : /************************************************************************/
1995 : /* CPLVirtualMemFileMapNew() */
1996 : /************************************************************************/
1997 :
1998 28 : CPLVirtualMem *CPLVirtualMemFileMapNew(
1999 : VSILFILE *fp, vsi_l_offset nOffset, vsi_l_offset nLength,
2000 : CPLVirtualMemAccessMode eAccessMode,
2001 : CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2002 : {
2003 : #if SIZEOF_VOIDP == 4
2004 : if (nLength != static_cast<size_t>(nLength))
2005 : {
2006 : CPLError(CE_Failure, CPLE_AppDefined,
2007 : "nLength = " CPL_FRMT_GUIB
2008 : " incompatible with 32 bit architecture",
2009 : nLength);
2010 : return nullptr;
2011 : }
2012 : if (nOffset + CPLGetPageSize() !=
2013 : static_cast<vsi_l_offset>(
2014 : static_cast<off_t>(nOffset + CPLGetPageSize())))
2015 : {
2016 : CPLError(CE_Failure, CPLE_AppDefined,
2017 : "nOffset = " CPL_FRMT_GUIB
2018 : " incompatible with 32 bit architecture",
2019 : nOffset);
2020 : return nullptr;
2021 : }
2022 : #endif
2023 :
2024 : int fd = static_cast<int>(
2025 28 : reinterpret_cast<GUIntptr_t>(VSIFGetNativeFileDescriptorL(fp)));
2026 28 : if (fd == 0)
2027 : {
2028 0 : CPLError(CE_Failure, CPLE_AppDefined,
2029 : "Cannot operate on a virtual file");
2030 0 : return nullptr;
2031 : }
2032 :
2033 : const off_t nAlignedOffset =
2034 28 : static_cast<off_t>((nOffset / CPLGetPageSize()) * CPLGetPageSize());
2035 28 : size_t nAlignment = static_cast<size_t>(nOffset - nAlignedOffset);
2036 28 : size_t nMappingSize = static_cast<size_t>(nLength + nAlignment);
2037 :
2038 : // Need to ensure that the requested extent fits into the file size
2039 : // otherwise SIGBUS errors will occur when using the mapping.
2040 28 : vsi_l_offset nCurPos = VSIFTellL(fp);
2041 28 : if (VSIFSeekL(fp, 0, SEEK_END) != 0)
2042 0 : return nullptr;
2043 28 : vsi_l_offset nFileSize = VSIFTellL(fp);
2044 28 : if (nFileSize < nOffset + nLength)
2045 : {
2046 4 : if (eAccessMode != VIRTUALMEM_READWRITE)
2047 : {
2048 0 : CPLError(CE_Failure, CPLE_AppDefined,
2049 : "Trying to map an extent outside of the file");
2050 0 : CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2051 0 : return nullptr;
2052 : }
2053 : else
2054 : {
2055 4 : char ch = 0;
2056 8 : if (VSIFSeekL(fp, nOffset + nLength - 1, SEEK_SET) != 0 ||
2057 4 : VSIFWriteL(&ch, 1, 1, fp) != 1)
2058 : {
2059 0 : CPLError(CE_Failure, CPLE_AppDefined,
2060 : "Cannot extend file to mapping size");
2061 0 : CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2062 0 : return nullptr;
2063 : }
2064 : }
2065 : }
2066 28 : if (VSIFSeekL(fp, nCurPos, SEEK_SET) != 0)
2067 0 : return nullptr;
2068 :
2069 : CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2070 28 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2071 28 : if (ctxt == nullptr)
2072 0 : return nullptr;
2073 :
2074 : void *addr =
2075 28 : mmap(nullptr, nMappingSize,
2076 : eAccessMode == VIRTUALMEM_READWRITE ? PROT_READ | PROT_WRITE
2077 : : PROT_READ,
2078 : MAP_SHARED, fd, nAlignedOffset);
2079 28 : if (addr == MAP_FAILED)
2080 : {
2081 0 : int myerrno = errno;
2082 0 : CPLError(CE_Failure, CPLE_AppDefined, "mmap() failed : %s",
2083 : strerror(myerrno));
2084 0 : VSIFree(ctxt);
2085 : // cppcheck thinks we are leaking addr.
2086 : // cppcheck-suppress memleak
2087 0 : return nullptr;
2088 : }
2089 :
2090 28 : ctxt->eType = VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2091 28 : ctxt->nRefCount = 1;
2092 28 : ctxt->eAccessMode = eAccessMode;
2093 28 : ctxt->pData = static_cast<GByte *>(addr) + nAlignment;
2094 28 : ctxt->pDataToFree = addr;
2095 28 : ctxt->nSize = static_cast<size_t>(nLength);
2096 28 : ctxt->nPageSize = CPLGetPageSize();
2097 28 : ctxt->bSingleThreadUsage = false;
2098 28 : ctxt->pfnFreeUserData = pfnFreeUserData;
2099 28 : ctxt->pCbkUserData = pCbkUserData;
2100 :
2101 28 : return ctxt;
2102 : }
2103 :
2104 : #else // HAVE_MMAP
2105 :
2106 : CPLVirtualMem *CPLVirtualMemFileMapNew(
2107 : VSILFILE * /* fp */, vsi_l_offset /* nOffset */, vsi_l_offset /* nLength */,
2108 : CPLVirtualMemAccessMode /* eAccessMode */,
2109 : CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
2110 : {
2111 : CPLError(CE_Failure, CPLE_NotSupported,
2112 : "CPLVirtualMemFileMapNew() unsupported on this "
2113 : "operating system / configuration");
2114 : return nullptr;
2115 : }
2116 :
2117 : #endif // HAVE_MMAP
2118 :
2119 : /************************************************************************/
2120 : /* CPLGetPageSize() */
2121 : /************************************************************************/
2122 :
2123 105 : size_t CPLGetPageSize(void)
2124 : {
2125 : #if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
2126 105 : return static_cast<size_t>(std::max(0L, sysconf(_SC_PAGESIZE)));
2127 : #else
2128 : return 0;
2129 : #endif
2130 : }
2131 :
2132 : /************************************************************************/
2133 : /* CPLIsVirtualMemFileMapAvailable() */
2134 : /************************************************************************/
2135 :
2136 30 : int CPLIsVirtualMemFileMapAvailable(void)
2137 : {
2138 : #ifdef HAVE_MMAP
2139 30 : return TRUE;
2140 : #else
2141 : return FALSE;
2142 : #endif
2143 : }
2144 :
2145 : /************************************************************************/
2146 : /* CPLVirtualMemFree() */
2147 : /************************************************************************/
2148 :
2149 61 : void CPLVirtualMemFree(CPLVirtualMem *ctxt)
2150 : {
2151 61 : if (ctxt == nullptr || --(ctxt->nRefCount) > 0)
2152 8 : return;
2153 :
2154 53 : if (ctxt->pVMemBase != nullptr)
2155 : {
2156 8 : CPLVirtualMemFree(ctxt->pVMemBase);
2157 8 : if (ctxt->pfnFreeUserData != nullptr)
2158 8 : ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2159 8 : CPLFree(ctxt);
2160 8 : return;
2161 : }
2162 :
2163 : #ifdef HAVE_MMAP
2164 45 : if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
2165 28 : CPLVirtualMemFreeFileMemoryMapped(ctxt);
2166 : #endif
2167 : #ifdef HAVE_VIRTUAL_MEM_VMA
2168 45 : if (ctxt->eType == VIRTUAL_MEM_TYPE_VMA)
2169 17 : CPLVirtualMemFreeFileMemoryMapped(
2170 : reinterpret_cast<CPLVirtualMemVMA *>(ctxt));
2171 : #endif
2172 :
2173 45 : if (ctxt->pfnFreeUserData != nullptr)
2174 16 : ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2175 45 : CPLFree(ctxt);
2176 : }
2177 :
2178 : /************************************************************************/
2179 : /* CPLVirtualMemGetAddr() */
2180 : /************************************************************************/
2181 :
2182 302 : void *CPLVirtualMemGetAddr(CPLVirtualMem *ctxt)
2183 : {
2184 302 : return ctxt->pData;
2185 : }
2186 :
2187 : /************************************************************************/
2188 : /* CPLVirtualMemIsFileMapping() */
2189 : /************************************************************************/
2190 :
2191 4 : int CPLVirtualMemIsFileMapping(CPLVirtualMem *ctxt)
2192 : {
2193 4 : return ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2194 : }
2195 :
2196 : /************************************************************************/
2197 : /* CPLVirtualMemGetAccessMode() */
2198 : /************************************************************************/
2199 :
2200 0 : CPLVirtualMemAccessMode CPLVirtualMemGetAccessMode(CPLVirtualMem *ctxt)
2201 : {
2202 0 : return ctxt->eAccessMode;
2203 : }
2204 :
2205 : /************************************************************************/
2206 : /* CPLVirtualMemGetPageSize() */
2207 : /************************************************************************/
2208 :
2209 5 : size_t CPLVirtualMemGetPageSize(CPLVirtualMem *ctxt)
2210 : {
2211 5 : return ctxt->nPageSize;
2212 : }
2213 :
2214 : /************************************************************************/
2215 : /* CPLVirtualMemGetSize() */
2216 : /************************************************************************/
2217 :
2218 271 : size_t CPLVirtualMemGetSize(CPLVirtualMem *ctxt)
2219 : {
2220 271 : return ctxt->nSize;
2221 : }
2222 :
2223 : /************************************************************************/
2224 : /* CPLVirtualMemIsAccessThreadSafe() */
2225 : /************************************************************************/
2226 :
2227 1 : int CPLVirtualMemIsAccessThreadSafe(CPLVirtualMem *ctxt)
2228 : {
2229 1 : return !ctxt->bSingleThreadUsage;
2230 : }
2231 :
2232 : /************************************************************************/
2233 : /* CPLVirtualMemDerivedNew() */
2234 : /************************************************************************/
2235 :
2236 8 : CPLVirtualMem *CPLVirtualMemDerivedNew(
2237 : CPLVirtualMem *pVMemBase, vsi_l_offset nOffset, vsi_l_offset nSize,
2238 : CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2239 : {
2240 8 : if (nOffset + nSize > pVMemBase->nSize)
2241 0 : return nullptr;
2242 :
2243 : CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2244 8 : VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2245 8 : if (ctxt == nullptr)
2246 0 : return nullptr;
2247 :
2248 8 : ctxt->eType = pVMemBase->eType;
2249 8 : ctxt->nRefCount = 1;
2250 8 : ctxt->pVMemBase = pVMemBase;
2251 8 : pVMemBase->nRefCount++;
2252 8 : ctxt->eAccessMode = pVMemBase->eAccessMode;
2253 8 : ctxt->pData = static_cast<GByte *>(pVMemBase->pData) + nOffset;
2254 8 : ctxt->pDataToFree = nullptr;
2255 8 : ctxt->nSize = static_cast<size_t>(nSize);
2256 8 : ctxt->nPageSize = pVMemBase->nPageSize;
2257 8 : ctxt->bSingleThreadUsage = CPL_TO_BOOL(pVMemBase->bSingleThreadUsage);
2258 8 : ctxt->pfnFreeUserData = pfnFreeUserData;
2259 8 : ctxt->pCbkUserData = pCbkUserData;
2260 :
2261 8 : return ctxt;
2262 : }
|