w32tex
About: TeX Live provides a comprehensive TeX system including all the major TeX-related programs, macro packages, and fonts that are free software. Windows sources.
  Fossies Dox: w32tex-src.tar.xz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

lj_alloc.c
Go to the documentation of this file.
1 /*
2 ** Bundled memory allocator.
3 **
4 ** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5 ** The original bears the following remark:
6 **
7 ** This is a version (aka dlmalloc) of malloc/free/realloc written by
8 ** Doug Lea and released to the public domain, as explained at
9 ** http://creativecommons.org/licenses/publicdomain.
10 **
11 ** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
12 **
13 ** No additional copyright is claimed over the customizations.
14 ** Please do NOT bother the original author about this version here!
15 **
16 ** If you want to use dlmalloc in another project, you should get
17 ** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18 ** For thread-safe derivatives, take a look at:
19 ** - ptmalloc: http://www.malloc.de/
20 ** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21 */
22 
23 #define lj_alloc_c
24 #define LUA_CORE
25 
26 /* To get the mremap prototype. Must be defined before any system includes. */
27 #if defined(__linux__) && !defined(_GNU_SOURCE)
28 #define _GNU_SOURCE
29 #endif
30 
31 #include "lj_def.h"
32 #include "lj_arch.h"
33 #include "lj_alloc.h"
34 
35 #ifndef LUAJIT_USE_SYSMALLOC
36 
37 #define MAX_SIZE_T (~(size_t)0)
38 #define MALLOC_ALIGNMENT ((size_t)8U)
39 
40 #define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
41 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
42 #define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
43 #define MAX_RELEASE_CHECK_RATE 255
44 
45 /* ------------------- size_t and alignment properties -------------------- */
46 
47 /* The byte and bit size of a size_t */
48 #define SIZE_T_SIZE (sizeof(size_t))
49 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
50 
51 /* Some constants coerced to size_t */
52 /* Annoying but necessary to avoid errors on some platforms */
53 #define SIZE_T_ZERO ((size_t)0)
54 #define SIZE_T_ONE ((size_t)1)
55 #define SIZE_T_TWO ((size_t)2)
56 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
57 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
58 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59 
60 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
61 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
62 
63 /* the number of bytes to offset an address to align it */
64 #define align_offset(A)\
65  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67 
68 /* -------------------------- MMAP support ------------------------------- */
69 
70 #define MFAIL ((void *)(MAX_SIZE_T))
71 #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
72 
73 #define IS_DIRECT_BIT (SIZE_T_ONE)
74 
75 
76 /* Determine system-specific block allocation method. */
77 #if LJ_TARGET_WINDOWS
78 
79 #define WIN32_LEAN_AND_MEAN
80 #include <windows.h>
81 
82 #define LJ_ALLOC_VIRTUALALLOC 1
83 
84 #if LJ_64 && !LJ_GC64
85 #define LJ_ALLOC_NTAVM 1
86 #endif
87 
88 #else
89 
90 #include <errno.h>
91 /* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92 #include <sys/mman.h>
93 
94 #define LJ_ALLOC_MMAP 1
95 
96 #if LJ_64
97 
98 #define LJ_ALLOC_MMAP_PROBE 1
99 
100 #if LJ_GC64
101 #define LJ_ALLOC_MBITS 47 /* 128 TB in LJ_GC64 mode. */
102 #elif LJ_TARGET_X64 && LJ_HASJIT
103 /* Due to limitations in the x64 compiler backend. */
104 #define LJ_ALLOC_MBITS 31 /* 2 GB on x64 with !LJ_GC64. */
105 #else
106 #define LJ_ALLOC_MBITS 32 /* 4 GB on other archs with !LJ_GC64. */
107 #endif
108 
109 #endif
110 
111 #if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112 #define LJ_ALLOC_MMAP32 1
113 #endif
114 
115 #if LJ_TARGET_LINUX
116 #define LJ_ALLOC_MREMAP 1
117 #endif
118 
119 #endif
120 
121 
122 #if LJ_ALLOC_VIRTUALALLOC
123 
124 #if LJ_ALLOC_NTAVM
125 /* Undocumented, but hey, that's what we all love so much about Windows. */
126 typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127  size_t *size, ULONG alloctype, ULONG prot);
128 static PNTAVM ntavm;
129 
130 /* Number of top bits of the lower 32 bits of an address that must be zero.
131 ** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132 */
133 #define NTAVM_ZEROBITS 1
134 
135 static void init_mmap(void)
136 {
137  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138  "NtAllocateVirtualMemory");
139 }
140 #define INIT_MMAP() init_mmap()
141 
142 /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143 static void *CALL_MMAP(size_t size)
144 {
145  DWORD olderr = GetLastError();
146  void *ptr = NULL;
147  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149  SetLastError(olderr);
150  return st == 0 ? ptr : MFAIL;
151 }
152 
153 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154 static void *DIRECT_MMAP(size_t size)
155 {
156  DWORD olderr = GetLastError();
157  void *ptr = NULL;
158  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160  SetLastError(olderr);
161  return st == 0 ? ptr : MFAIL;
162 }
163 
164 #else
165 
166 /* Win32 MMAP via VirtualAlloc */
167 static void *CALL_MMAP(size_t size)
168 {
169  DWORD olderr = GetLastError();
170  void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171  SetLastError(olderr);
172  return ptr ? ptr : MFAIL;
173 }
174 
175 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176 static void *DIRECT_MMAP(size_t size)
177 {
178  DWORD olderr = GetLastError();
179  void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180  PAGE_READWRITE);
181  SetLastError(olderr);
182  return ptr ? ptr : MFAIL;
183 }
184 
185 #endif
186 
187 /* This function supports releasing coalesed segments */
188 static int CALL_MUNMAP(void *ptr, size_t size)
189 {
190  DWORD olderr = GetLastError();
191  MEMORY_BASIC_INFORMATION minfo;
192  char *cptr = (char *)ptr;
193  while (size) {
194  if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195  return -1;
196  if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197  minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198  return -1;
199  if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200  return -1;
201  cptr += minfo.RegionSize;
202  size -= minfo.RegionSize;
203  }
204  SetLastError(olderr);
205  return 0;
206 }
207 
208 #elif LJ_ALLOC_MMAP
209 
210 #define MMAP_PROT (PROT_READ|PROT_WRITE)
211 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212 #define MAP_ANONYMOUS MAP_ANON
213 #endif
214 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
215 
216 #if LJ_ALLOC_MMAP_PROBE
217 
218 #ifdef MAP_TRYFIXED
219 #define MMAP_FLAGS_PROBE (MMAP_FLAGS|MAP_TRYFIXED)
220 #else
221 #define MMAP_FLAGS_PROBE MMAP_FLAGS
222 #endif
223 
224 #define LJ_ALLOC_MMAP_PROBE_MAX 30
225 #define LJ_ALLOC_MMAP_PROBE_LINEAR 5
226 
227 #define LJ_ALLOC_MMAP_PROBE_LOWER ((uintptr_t)0x4000)
228 
229 /* No point in a giant ifdef mess. Just try to open /dev/urandom.
230 ** It doesn't really matter if this fails, since we get some ASLR bits from
231 ** every unsuitable allocation, too. And we prefer linear allocation, anyway.
232 */
233 #include <fcntl.h>
234 #include <unistd.h>
235 
237 {
238  uintptr_t val;
239  int fd = open("/dev/urandom", O_RDONLY);
240  if (fd != -1) {
241  int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
242  (void)close(fd);
243  if (ok) return val;
244  }
245  return 1; /* Punt. */
246 }
247 
248 static void *mmap_probe(size_t size)
249 {
250  /* Hint for next allocation. Doesn't need to be thread-safe. */
251  static uintptr_t hint_addr = 0;
252  static uintptr_t hint_prng = 0;
253  int olderr = errno;
254  int retry;
255  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
256  void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
258  if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER) {
259  /* We got a suitable address. Bump the hint address. */
260  hint_addr = addr + size;
261  errno = olderr;
262  return p;
263  }
264  if (p != MFAIL) {
265  munmap(p, size);
266  } else if (errno == ENOMEM) {
267  return MFAIL;
268  }
269  if (hint_addr) {
270  /* First, try linear probing. */
271  if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
272  hint_addr += 0x1000000;
273  if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
274  hint_addr = 0;
275  continue;
276  } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
277  /* Next, try a no-hint probe to get back an ASLR address. */
278  hint_addr = 0;
279  continue;
280  }
281  }
282  /* Finally, try pseudo-random probing. */
283  if (LJ_UNLIKELY(hint_prng == 0)) {
284  hint_prng = mmap_probe_seed();
285  }
286  /* The unsuitable address we got has some ASLR PRNG bits. */
287  hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
288  do { /* The PRNG itself is very weak, but see above. */
289  hint_prng = hint_prng * 1103515245 + 12345;
290  hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
291  hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
292  } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
293  }
294  errno = olderr;
295  return MFAIL;
296 }
297 
298 #endif
299 
300 #if LJ_ALLOC_MMAP32
301 
302 #if defined(__sun__)
303 #define LJ_ALLOC_MMAP32_START ((uintptr_t)0x1000)
304 #else
305 #define LJ_ALLOC_MMAP32_START ((uintptr_t)0)
306 #endif
307 
308 static void *mmap_map32(size_t size)
309 {
310 #if LJ_ALLOC_MMAP_PROBE
311  static int fallback = 0;
312  if (fallback)
313  return mmap_probe(size);
314 #endif
315  {
316  int olderr = errno;
317  void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
318  errno = olderr;
319  /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
320 #if LJ_ALLOC_MMAP_PROBE
321  if (ptr == MFAIL) {
322  fallback = 1;
323  return mmap_probe(size);
324  }
325 #endif
326  return ptr;
327  }
328 }
329 
330 #endif
331 
332 #if LJ_ALLOC_MMAP32
333 #define CALL_MMAP(size) mmap_map32(size)
334 #elif LJ_ALLOC_MMAP_PROBE
335 #define CALL_MMAP(size) mmap_probe(size)
336 #else
337 static void *CALL_MMAP(size_t size)
338 {
339  int olderr = errno;
340  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
341  errno = olderr;
342  return ptr;
343 }
344 #endif
345 
346 #if ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
347 
348 #include <sys/resource.h>
349 
350 static void init_mmap(void)
351 {
352  struct rlimit rlim;
353  rlim.rlim_cur = rlim.rlim_max = 0x10000;
354  setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail later. */
355 }
356 #define INIT_MMAP() init_mmap()
357 
358 #endif
359 
360 static int CALL_MUNMAP(void *ptr, size_t size)
361 {
362  int olderr = errno;
363  int ret = munmap(ptr, size);
364  errno = olderr;
365  return ret;
366 }
367 
368 #if LJ_ALLOC_MREMAP
369 /* Need to define _GNU_SOURCE to get the mremap prototype. */
370 static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
371 {
372  int olderr = errno;
373  ptr = mremap(ptr, osz, nsz, flags);
374  errno = olderr;
375  return ptr;
376 }
377 
378 #define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
379 #define CALL_MREMAP_NOMOVE 0
380 #define CALL_MREMAP_MAYMOVE 1
381 #if LJ_64 && !LJ_GC64
382 #define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
383 #else
384 #define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
385 #endif
386 #endif
387 
388 #endif
389 
390 
391 #ifndef INIT_MMAP
392 #define INIT_MMAP() ((void)0)
393 #endif
394 
395 #ifndef DIRECT_MMAP
396 #define DIRECT_MMAP(s) CALL_MMAP(s)
397 #endif
398 
399 #ifndef CALL_MREMAP
400 #define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
401 #endif
402 
403 /* ----------------------- Chunk representations ------------------------ */
404 
405 struct malloc_chunk {
406  size_t prev_foot; /* Size of previous chunk (if free). */
407  size_t head; /* Size and inuse bits. */
408  struct malloc_chunk *fd; /* double links -- used only if free. */
409  struct malloc_chunk *bk;
410 };
411 
412 typedef struct malloc_chunk mchunk;
413 typedef struct malloc_chunk *mchunkptr;
414 typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
415 typedef size_t bindex_t; /* Described below */
416 typedef unsigned int binmap_t; /* Described below */
417 typedef unsigned int flag_t; /* The type of various bit flag sets */
418 
419 /* ------------------- Chunks sizes and alignments ----------------------- */
420 
421 #define MCHUNK_SIZE (sizeof(mchunk))
422 
423 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
424 
425 /* Direct chunks need a second word of overhead ... */
426 #define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
427 /* ... and additional padding for fake next-chunk at foot */
428 #define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
429 
430 /* The smallest size we can malloc is an aligned minimal chunk */
431 #define MIN_CHUNK_SIZE\
432  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
433 
434 /* conversion from malloc headers to user pointers, and back */
435 #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
436 #define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
437 /* chunk associated with aligned address A */
438 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
439 
440 /* Bounds on request (not chunk) sizes. */
441 #define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
442 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
443 
444 /* pad request bytes into a usable size */
445 #define pad_request(req) \
446  (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
447 
448 /* pad request, checking for minimum (but not maximum) */
449 #define request2size(req) \
450  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
451 
452 /* ------------------ Operations on head and foot fields ----------------- */
453 
454 #define PINUSE_BIT (SIZE_T_ONE)
455 #define CINUSE_BIT (SIZE_T_TWO)
456 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
457 
458 /* Head value for fenceposts */
459 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
460 
461 /* extraction of fields from head words */
462 #define cinuse(p) ((p)->head & CINUSE_BIT)
463 #define pinuse(p) ((p)->head & PINUSE_BIT)
464 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
465 
466 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
467 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
468 
469 /* Treat space at ptr +/- offset as a chunk */
470 #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
471 #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
472 
473 /* Ptr to next or previous physical malloc_chunk. */
474 #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
475 #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
476 
477 /* extract next chunk's pinuse bit */
478 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
479 
480 /* Get/set size at footer */
481 #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
482 #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
483 
484 /* Set size, pinuse bit, and foot */
485 #define set_size_and_pinuse_of_free_chunk(p, s)\
486  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
487 
488 /* Set size, pinuse bit, foot, and clear next pinuse */
489 #define set_free_with_pinuse(p, s, n)\
490  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
491 
492 #define is_direct(p)\
493  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
494 
495 /* Get the internal overhead associated with chunk p */
496 #define overhead_for(p)\
497  (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
498 
499 /* ---------------------- Overlaid data structures ----------------------- */
500 
501 struct malloc_tree_chunk {
502  /* The first four fields must be compatible with malloc_chunk */
503  size_t prev_foot;
504  size_t head;
505  struct malloc_tree_chunk *fd;
506  struct malloc_tree_chunk *bk;
507 
508  struct malloc_tree_chunk *child[2];
509  struct malloc_tree_chunk *parent;
510  bindex_t index;
511 };
512 
513 typedef struct malloc_tree_chunk tchunk;
514 typedef struct malloc_tree_chunk *tchunkptr;
515 typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
516 
517 /* A little helper macro for trees */
518 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
519 
520 /* ----------------------------- Segments -------------------------------- */
521 
522 struct malloc_segment {
523  char *base; /* base address */
524  size_t size; /* allocated size */
525  struct malloc_segment *next; /* ptr to next segment */
526 };
527 
528 typedef struct malloc_segment msegment;
529 typedef struct malloc_segment *msegmentptr;
530 
531 /* ---------------------------- malloc_state ----------------------------- */
532 
533 /* Bin types, widths and sizes */
534 #define NSMALLBINS (32U)
535 #define NTREEBINS (32U)
536 #define SMALLBIN_SHIFT (3U)
537 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
538 #define TREEBIN_SHIFT (8U)
539 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
540 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
541 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
542 
543 struct malloc_state {
546  size_t dvsize;
547  size_t topsize;
548  mchunkptr dv;
549  mchunkptr top;
550  size_t trim_check;
551  size_t release_checks;
554  msegment seg;
555 };
556 
557 typedef struct malloc_state *mstate;
558 
559 #define is_initialized(M) ((M)->top != 0)
560 
561 /* -------------------------- system alloc setup ------------------------- */
562 
563 /* page-align a size */
564 #define page_align(S)\
565  (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
566 
567 /* granularity-align a size */
568 #define granularity_align(S)\
569  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
570  & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
571 
572 #if LJ_TARGET_WINDOWS
573 #define mmap_align(S) granularity_align(S)
574 #else
575 #define mmap_align(S) page_align(S)
576 #endif
577 
578 /* True if segment S holds address A */
579 #define segment_holds(S, A)\
580  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
581 
582 /* Return segment holding given address */
584 {
585  msegmentptr sp = &m->seg;
586  for (;;) {
587  if (addr >= sp->base && addr < sp->base + sp->size)
588  return sp;
589  if ((sp = sp->next) == 0)
590  return 0;
591  }
592 }
593 
594 /* Return true if segment contains a segment link */
596 {
597  msegmentptr sp = &m->seg;
598  for (;;) {
599  if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
600  return 1;
601  if ((sp = sp->next) == 0)
602  return 0;
603  }
604 }
605 
606 /*
607  TOP_FOOT_SIZE is padding at the end of a segment, including space
608  that may be needed to place segment records and fenceposts when new
609  noncontiguous segments are added.
610 */
611 #define TOP_FOOT_SIZE\
612  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
613 
614 /* ---------------------------- Indexing Bins ---------------------------- */
615 
616 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
617 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
618 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
619 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
620 
621 /* addressing by index. See above about smallbin repositioning */
622 #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
623 #define treebin_at(M,i) (&((M)->treebins[i]))
624 
625 /* assign tree index for size S to variable I */
626 #define compute_tree_index(S, I)\
627 {\
628  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
629  if (X == 0) {\
630  I = 0;\
631  } else if (X > 0xFFFF) {\
632  I = NTREEBINS-1;\
633  } else {\
634  unsigned int K = lj_fls(X);\
635  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
636  }\
637 }
638 
639 /* Bit representing maximum resolved size in a treebin at i */
640 #define bit_for_tree_index(i) \
641  (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
642 
643 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
644 #define leftshift_for_tree_index(i) \
645  ((i == NTREEBINS-1)? 0 : \
646  ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
647 
648 /* The size of the smallest chunk held in bin with index i */
649 #define minsize_for_tree_index(i) \
650  ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
651  (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
652 
653 /* ------------------------ Operations on bin maps ----------------------- */
654 
655 /* bit corresponding to given index */
656 #define idx2bit(i) ((binmap_t)(1) << (i))
657 
658 /* Mark/Clear bits with given index */
659 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
660 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
661 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
662 
663 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
664 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
665 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
666 
667 /* mask with all bits to left of least bit of x on */
668 #define left_bits(x) ((x<<1) | (~(x<<1)+1))
669 
670 /* Set cinuse bit and pinuse bit of next chunk */
671 #define set_inuse(M,p,s)\
672  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
673  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
674 
675 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
676 #define set_inuse_and_pinuse(M,p,s)\
677  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
678  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
679 
680 /* Set size, cinuse and pinuse bit of this chunk */
681 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
682  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
683 
684 /* ----------------------- Operations on smallbins ----------------------- */
685 
686 /* Link a free chunk into a smallbin */
687 #define insert_small_chunk(M, P, S) {\
688  bindex_t I = small_index(S);\
689  mchunkptr B = smallbin_at(M, I);\
690  mchunkptr F = B;\
691  if (!smallmap_is_marked(M, I))\
692  mark_smallmap(M, I);\
693  else\
694  F = B->fd;\
695  B->fd = P;\
696  F->bk = P;\
697  P->fd = F;\
698  P->bk = B;\
699 }
700 
701 /* Unlink a chunk from a smallbin */
702 #define unlink_small_chunk(M, P, S) {\
703  mchunkptr F = P->fd;\
704  mchunkptr B = P->bk;\
705  bindex_t I = small_index(S);\
706  if (F == B) {\
707  clear_smallmap(M, I);\
708  } else {\
709  F->bk = B;\
710  B->fd = F;\
711  }\
712 }
713 
714 /* Unlink the first chunk from a smallbin */
715 #define unlink_first_small_chunk(M, B, P, I) {\
716  mchunkptr F = P->fd;\
717  if (B == F) {\
718  clear_smallmap(M, I);\
719  } else {\
720  B->fd = F;\
721  F->bk = B;\
722  }\
723 }
724 
725 /* Replace dv node, binning the old one */
726 /* Used only when dvsize known to be small */
727 #define replace_dv(M, P, S) {\
728  size_t DVS = M->dvsize;\
729  if (DVS != 0) {\
730  mchunkptr DV = M->dv;\
731  insert_small_chunk(M, DV, DVS);\
732  }\
733  M->dvsize = S;\
734  M->dv = P;\
735 }
736 
737 /* ------------------------- Operations on trees ------------------------- */
738 
739 /* Insert chunk into tree */
740 #define insert_large_chunk(M, X, S) {\
741  tbinptr *H;\
742  bindex_t I;\
743  compute_tree_index(S, I);\
744  H = treebin_at(M, I);\
745  X->index = I;\
746  X->child[0] = X->child[1] = 0;\
747  if (!treemap_is_marked(M, I)) {\
748  mark_treemap(M, I);\
749  *H = X;\
750  X->parent = (tchunkptr)H;\
751  X->fd = X->bk = X;\
752  } else {\
753  tchunkptr T = *H;\
754  size_t K = S << leftshift_for_tree_index(I);\
755  for (;;) {\
756  if (chunksize(T) != S) {\
757  tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
758  K <<= 1;\
759  if (*C != 0) {\
760  T = *C;\
761  } else {\
762  *C = X;\
763  X->parent = T;\
764  X->fd = X->bk = X;\
765  break;\
766  }\
767  } else {\
768  tchunkptr F = T->fd;\
769  T->fd = F->bk = X;\
770  X->fd = F;\
771  X->bk = T;\
772  X->parent = 0;\
773  break;\
774  }\
775  }\
776  }\
777 }
778 
779 #define unlink_large_chunk(M, X) {\
780  tchunkptr XP = X->parent;\
781  tchunkptr R;\
782  if (X->bk != X) {\
783  tchunkptr F = X->fd;\
784  R = X->bk;\
785  F->bk = R;\
786  R->fd = F;\
787  } else {\
788  tchunkptr *RP;\
789  if (((R = *(RP = &(X->child[1]))) != 0) ||\
790  ((R = *(RP = &(X->child[0]))) != 0)) {\
791  tchunkptr *CP;\
792  while ((*(CP = &(R->child[1])) != 0) ||\
793  (*(CP = &(R->child[0])) != 0)) {\
794  R = *(RP = CP);\
795  }\
796  *RP = 0;\
797  }\
798  }\
799  if (XP != 0) {\
800  tbinptr *H = treebin_at(M, X->index);\
801  if (X == *H) {\
802  if ((*H = R) == 0) \
803  clear_treemap(M, X->index);\
804  } else {\
805  if (XP->child[0] == X) \
806  XP->child[0] = R;\
807  else \
808  XP->child[1] = R;\
809  }\
810  if (R != 0) {\
811  tchunkptr C0, C1;\
812  R->parent = XP;\
813  if ((C0 = X->child[0]) != 0) {\
814  R->child[0] = C0;\
815  C0->parent = R;\
816  }\
817  if ((C1 = X->child[1]) != 0) {\
818  R->child[1] = C1;\
819  C1->parent = R;\
820  }\
821  }\
822  }\
823 }
824 
825 /* Relays to large vs small bin operations */
826 
827 #define insert_chunk(M, P, S)\
828  if (is_small(S)) { insert_small_chunk(M, P, S)\
829  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
830 
831 #define unlink_chunk(M, P, S)\
832  if (is_small(S)) { unlink_small_chunk(M, P, S)\
833  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
834 
835 /* ----------------------- Direct-mmapping chunks ----------------------- */
836 
837 static void *direct_alloc(size_t nb)
838 {
839  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
840  if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */
841  char *mm = (char *)(DIRECT_MMAP(mmsize));
842  if (mm != CMFAIL) {
843  size_t offset = align_offset(chunk2mem(mm));
844  size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
845  mchunkptr p = (mchunkptr)(mm + offset);
846  p->prev_foot = offset | IS_DIRECT_BIT;
847  p->head = psize|CINUSE_BIT;
848  chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
849  chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
850  return chunk2mem(p);
851  }
852  }
853  return NULL;
854 }
855 
856 static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
857 {
858  size_t oldsize = chunksize(oldp);
859  if (is_small(nb)) /* Can't shrink direct regions below small size */
860  return NULL;
861  /* Keep old chunk if big enough but not too big */
862  if (oldsize >= nb + SIZE_T_SIZE &&
863  (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
864  return oldp;
865  } else {
866  size_t offset = oldp->prev_foot & ~~IS_DIRECT_BIT;
867  size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
868  size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
869  char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
870  oldmmsize, newmmsize, CALL_MREMAP_MV);
871  if (cp != CMFAIL) {
872  mchunkptr newp = (mchunkptr)(cp + offset);
873  size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
874  newp->head = psize|CINUSE_BIT;
875  chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
876  chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
877  return newp;
878  }
879  }
880  return NULL;
881 }
882 
883 /* -------------------------- mspace management -------------------------- */
884 
885 /* Initialize top chunk and its size */
886 static void init_top(mstate m, mchunkptr p, size_t psize)
887 {
888  /* Ensure alignment */
889  size_t offset = align_offset(chunk2mem(p));
890  p = (mchunkptr)((char *)p + offset);
891  psize -= offset;
892 
893  m->top = p;
894  m->topsize = psize;
895  p->head = psize | PINUSE_BIT;
896  /* set size of fake trailing chunk holding overhead space only once */
897  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
898  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
899 }
900 
901 /* Initialize bins for a new mstate that is otherwise zeroed out */
902 static void init_bins(mstate m)
903 {
904  /* Establish circular links for smallbins */
905  bindex_t i;
906  for (i = 0; i < NSMALLBINS; i++) {
907  sbinptr bin = smallbin_at(m,i);
908  bin->fd = bin->bk = bin;
909  }
910 }
911 
912 /* Allocate chunk and prepend remainder with chunk in successor base. */
913 static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
914 {
915  mchunkptr p = align_as_chunk(newbase);
916  mchunkptr oldfirst = align_as_chunk(oldbase);
917  size_t psize = (size_t)((char *)oldfirst - (char *)p);
919  size_t qsize = psize - nb;
921 
922  /* consolidate remainder with first chunk of old base */
923  if (oldfirst == m->top) {
924  size_t tsize = m->topsize += qsize;
925  m->top = q;
926  q->head = tsize | PINUSE_BIT;
927  } else if (oldfirst == m->dv) {
928  size_t dsize = m->dvsize += qsize;
929  m->dv = q;
931  } else {
932  if (!cinuse(oldfirst)) {
933  size_t nsize = chunksize(oldfirst);
934  unlink_chunk(m, oldfirst, nsize);
935  oldfirst = chunk_plus_offset(oldfirst, nsize);
936  qsize += nsize;
937  }
938  set_free_with_pinuse(q, qsize, oldfirst);
939  insert_chunk(m, q, qsize);
940  }
941 
942  return chunk2mem(p);
943 }
944 
945 /* Add a segment to hold a new noncontiguous region */
946 static void add_segment(mstate m, char *tbase, size_t tsize)
947 {
948  /* Determine locations and sizes of segment, fenceposts, old top */
949  char *old_top = (char *)m->top;
950  msegmentptr oldsp = segment_holding(m, old_top);
951  char *old_end = oldsp->base + oldsp->size;
952  size_t ssize = pad_request(sizeof(struct malloc_segment));
953  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
954  size_t offset = align_offset(chunk2mem(rawsp));
955  char *asp = rawsp + offset;
956  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
959  mchunkptr tnext = chunk_plus_offset(sp, ssize);
960  mchunkptr p = tnext;
961 
962  /* reset top to new space */
963  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
964 
965  /* Set up segment record */
967  *ss = m->seg; /* Push current record */
968  m->seg.base = tbase;
969  m->seg.size = tsize;
970  m->seg.next = ss;
971 
972  /* Insert trailing fenceposts */
973  for (;;) {
975  p->head = FENCEPOST_HEAD;
976  if ((char *)(&(nextp->head)) < old_end)
977  p = nextp;
978  else
979  break;
980  }
981 
982  /* Insert the rest of old top into a bin as an ordinary free chunk */
983  if (csp != old_top) {
984  mchunkptr q = (mchunkptr)old_top;
985  size_t psize = (size_t)(csp - old_top);
986  mchunkptr tn = chunk_plus_offset(q, psize);
987  set_free_with_pinuse(q, psize, tn);
988  insert_chunk(m, q, psize);
989  }
990 }
991 
992 /* -------------------------- System allocation -------------------------- */
993 
994 static void *alloc_sys(mstate m, size_t nb)
995 {
996  char *tbase = CMFAIL;
997  size_t tsize = 0;
998 
999  /* Directly map large chunks */
1001  void *mem = direct_alloc(nb);
1002  if (mem != 0)
1003  return mem;
1004  }
1005 
1006  {
1007  size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
1008  size_t rsize = granularity_align(req);
1009  if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
1010  char *mp = (char *)(CALL_MMAP(rsize));
1011  if (mp != CMFAIL) {
1012  tbase = mp;
1013  tsize = rsize;
1014  }
1015  }
1016  }
1017 
1018  if (tbase != CMFAIL) {
1019  msegmentptr sp = &m->seg;
1020  /* Try to merge with an existing segment */
1021  while (sp != 0 && tbase != sp->base + sp->size)
1022  sp = sp->next;
1023  if (sp != 0 && segment_holds(sp, m->top)) { /* append */
1024  sp->size += tsize;
1025  init_top(m, m->top, m->topsize + tsize);
1026  } else {
1027  sp = &m->seg;
1028  while (sp != 0 && sp->base != tbase + tsize)
1029  sp = sp->next;
1030  if (sp != 0) {
1031  char *oldbase = sp->base;
1032  sp->base = tbase;
1033  sp->size += tsize;
1034  return prepend_alloc(m, tbase, oldbase, nb);
1035  } else {
1036  add_segment(m, tbase, tsize);
1037  }
1038  }
1039 
1040  if (nb < m->topsize) { /* Allocate from new or extended top space */
1041  size_t rsize = m->topsize -= nb;
1042  mchunkptr p = m->top;
1043  mchunkptr r = m->top = chunk_plus_offset(p, nb);
1044  r->head = rsize | PINUSE_BIT;
1046  return chunk2mem(p);
1047  }
1048  }
1049 
1050  return NULL;
1051 }
1052 
1053 /* ----------------------- system deallocation -------------------------- */
1054 
1055 /* Unmap and unlink any mmapped segments that don't contain used chunks */
1057 {
1058  size_t released = 0;
1059  size_t nsegs = 0;
1060  msegmentptr pred = &m->seg;
1061  msegmentptr sp = pred->next;
1062  while (sp != 0) {
1063  char *base = sp->base;
1064  size_t size = sp->size;
1065  msegmentptr next = sp->next;
1066  nsegs++;
1067  {
1069  size_t psize = chunksize(p);
1070  /* Can unmap if first chunk holds entire segment and not pinned */
1071  if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
1072  tchunkptr tp = (tchunkptr)p;
1073  if (p == m->dv) {
1074  m->dv = 0;
1075  m->dvsize = 0;
1076  } else {
1078  }
1079  if (CALL_MUNMAP(base, size) == 0) {
1080  released += size;
1081  /* unlink obsoleted record */
1082  sp = pred;
1083  sp->next = next;
1084  } else { /* back out if cannot unmap */
1085  insert_large_chunk(m, tp, psize);
1086  }
1087  }
1088  }
1089  pred = sp;
1090  sp = next;
1091  }
1092  /* Reset check counter */
1093  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
1095  return released;
1096 }
1097 
1098 static int alloc_trim(mstate m, size_t pad)
1099 {
1100  size_t released = 0;
1101  if (pad < MAX_REQUEST && is_initialized(m)) {
1102  pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
1103 
1104  if (m->topsize > pad) {
1105  /* Shrink top space in granularity-size units, keeping at least one */
1106  size_t unit = DEFAULT_GRANULARITY;
1107  size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
1108  SIZE_T_ONE) * unit;
1109  msegmentptr sp = segment_holding(m, (char *)m->top);
1110 
1111  if (sp->size >= extra &&
1112  !has_segment_link(m, sp)) { /* can't shrink if pinned */
1113  size_t newsize = sp->size - extra;
1114  /* Prefer mremap, fall back to munmap */
1115  if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
1116  (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
1117  released = extra;
1118  }
1119  }
1120 
1121  if (released != 0) {
1122  sp->size -= released;
1123  init_top(m, m->top, m->topsize - released);
1124  }
1125  }
1126 
1127  /* Unmap any unused mmapped segments */
1128  released += release_unused_segments(m);
1129 
1130  /* On failure, disable autotrim to avoid repeated failed future calls */
1131  if (released == 0 && m->topsize > m->trim_check)
1132  m->trim_check = MAX_SIZE_T;
1133  }
1134 
1135  return (released != 0)? 1 : 0;
1136 }
1137 
1138 /* ---------------------------- malloc support --------------------------- */
1139 
1140 /* allocate a large request from the best fitting chunk in a treebin */
1141 static void *tmalloc_large(mstate m, size_t nb)
1142 {
1143  tchunkptr v = 0;
1144  size_t rsize = ~~nb+1; /* Unsigned negation */
1145  tchunkptr t;
1146  bindex_t idx;
1148 
1149  if ((t = *treebin_at(m, idx)) != 0) {
1150  /* Traverse tree for this bin looking for node with size == nb */
1151  size_t sizebits = nb << leftshift_for_tree_index(idx);
1152  tchunkptr rst = 0; /* The deepest untaken right subtree */
1153  for (;;) {
1154  tchunkptr rt;
1155  size_t trem = chunksize(t) - nb;
1156  if (trem < rsize) {
1157  v = t;
1158  if ((rsize = trem) == 0)
1159  break;
1160  }
1161  rt = t->child[1];
1162  t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1163  if (rt != 0 && rt != t)
1164  rst = rt;
1165  if (t == 0) {
1166  t = rst; /* set t to least subtree holding sizes > nb */
1167  break;
1168  }
1169  sizebits <<= 1;
1170  }
1171  }
1172 
1173  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1174  binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1175  if (leftbits != 0)
1176  t = *treebin_at(m, lj_ffs(leftbits));
1177  }
1178 
1179  while (t != 0) { /* find smallest of tree or subtree */
1180  size_t trem = chunksize(t) - nb;
1181  if (trem < rsize) {
1182  rsize = trem;
1183  v = t;
1184  }
1185  t = leftmost_child(t);
1186  }
1187 
1188  /* If dv is a better fit, return NULL so malloc will use it */
1189  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1192  if (rsize < MIN_CHUNK_SIZE) {
1193  set_inuse_and_pinuse(m, v, (rsize + nb));
1194  } else {
1197  insert_chunk(m, r, rsize);
1198  }
1199  return chunk2mem(v);
1200  }
1201  return NULL;
1202 }
1203 
1204 /* allocate a small request from the best fitting chunk in a treebin */
1205 static void *tmalloc_small(mstate m, size_t nb)
1206 {
1207  tchunkptr t, v;
1208  mchunkptr r;
1209  size_t rsize;
1210  bindex_t i = lj_ffs(m->treemap);
1211 
1212  v = t = *treebin_at(m, i);
1213  rsize = chunksize(t) - nb;
1214 
1215  while ((t = leftmost_child(t)) != 0) {
1216  size_t trem = chunksize(t) - nb;
1217  if (trem < rsize) {
1218  rsize = trem;
1219  v = t;
1220  }
1221  }
1222 
1223  r = chunk_plus_offset(v, nb);
1225  if (rsize < MIN_CHUNK_SIZE) {
1226  set_inuse_and_pinuse(m, v, (rsize + nb));
1227  } else {
1230  replace_dv(m, r, rsize);
1231  }
1232  return chunk2mem(v);
1233 }
1234 
1235 /* ----------------------------------------------------------------------- */
1236 
1237 void *lj_alloc_create(void)
1238 {
1239  size_t tsize = DEFAULT_GRANULARITY;
1240  char *tbase;
1241  INIT_MMAP();
1242  tbase = (char *)(CALL_MMAP(tsize));
1243  if (tbase != CMFAIL) {
1244  size_t msize = pad_request(sizeof(struct malloc_state));
1245  mchunkptr mn;
1246  mchunkptr msp = align_as_chunk(tbase);
1247  mstate m = (mstate)(chunk2mem(msp));
1248  memset(m, 0, msize);
1249  msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
1250  m->seg.base = tbase;
1251  m->seg.size = tsize;
1252  m->release_checks = MAX_RELEASE_CHECK_RATE;
1253  init_bins(m);
1254  mn = next_chunk(mem2chunk(m));
1255  init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
1256  return m;
1257  }
1258  return NULL;
1259 }
1260 
1261 void lj_alloc_destroy(void *msp)
1262 {
1263  mstate ms = (mstate)msp;
1264  msegmentptr sp = &ms->seg;
1265  while (sp != 0) {
1266  char *base = sp->base;
1267  size_t size = sp->size;
1268  sp = sp->next;
1269  CALL_MUNMAP(base, size);
1270  }
1271 }
1272 
1273 static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
1274 {
1275  mstate ms = (mstate)msp;
1276  void *mem;
1277  size_t nb;
1278  if (nsize <= MAX_SMALL_REQUEST) {
1279  bindex_t idx;
1280  binmap_t smallbits;
1281  nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
1282  idx = small_index(nb);
1283  smallbits = ms->smallmap >> idx;
1284 
1285  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
1286  mchunkptr b, p;
1287  idx += ~~smallbits & 1; /* Uses next bin if idx empty */
1288  b = smallbin_at(ms, idx);
1289  p = b->fd;
1292  mem = chunk2mem(p);
1293  return mem;
1294  } else if (nb > ms->dvsize) {
1295  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
1296  mchunkptr b, p, r;
1297  size_t rsize;
1298  binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1299  bindex_t i = lj_ffs(leftbits);
1300  b = smallbin_at(ms, i);
1301  p = b->fd;
1302  unlink_first_small_chunk(ms, b, p, i);
1303  rsize = small_index2size(i) - nb;
1304  /* Fit here cannot be remainderless if 4byte sizes */
1305  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
1307  } else {
1309  r = chunk_plus_offset(p, nb);
1311  replace_dv(ms, r, rsize);
1312  }
1313  mem = chunk2mem(p);
1314  return mem;
1315  } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
1316  return mem;
1317  }
1318  }
1319  } else if (nsize >= MAX_REQUEST) {
1320  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1321  } else {
1322  nb = pad_request(nsize);
1323  if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
1324  return mem;
1325  }
1326  }
1327 
1328  if (nb <= ms->dvsize) {
1329  size_t rsize = ms->dvsize - nb;
1330  mchunkptr p = ms->dv;
1331  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1332  mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
1333  ms->dvsize = rsize;
1336  } else { /* exhaust dv */
1337  size_t dvs = ms->dvsize;
1338  ms->dvsize = 0;
1339  ms->dv = 0;
1340  set_inuse_and_pinuse(ms, p, dvs);
1341  }
1342  mem = chunk2mem(p);
1343  return mem;
1344  } else if (nb < ms->topsize) { /* Split top */
1345  size_t rsize = ms->topsize -= nb;
1346  mchunkptr p = ms->top;
1347  mchunkptr r = ms->top = chunk_plus_offset(p, nb);
1348  r->head = rsize | PINUSE_BIT;
1350  mem = chunk2mem(p);
1351  return mem;
1352  }
1353  return alloc_sys(ms, nb);
1354 }
1355 
1356 static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
1357 {
1358  if (ptr != 0) {
1359  mchunkptr p = mem2chunk(ptr);
1360  mstate fm = (mstate)msp;
1361  size_t psize = chunksize(p);
1362  mchunkptr next = chunk_plus_offset(p, psize);
1363  if (!pinuse(p)) {
1364  size_t prevsize = p->prev_foot;
1365  if ((prevsize & IS_DIRECT_BIT) != 0) {
1366  prevsize &= ~~IS_DIRECT_BIT;
1367  psize += prevsize + DIRECT_FOOT_PAD;
1368  CALL_MUNMAP((char *)p - prevsize, psize);
1369  return NULL;
1370  } else {
1371  mchunkptr prev = chunk_minus_offset(p, prevsize);
1372  psize += prevsize;
1373  p = prev;
1374  /* consolidate backward */
1375  if (p != fm->dv) {
1376  unlink_chunk(fm, p, prevsize);
1377  } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
1378  fm->dvsize = psize;
1379  set_free_with_pinuse(p, psize, next);
1380  return NULL;
1381  }
1382  }
1383  }
1384  if (!cinuse(next)) { /* consolidate forward */
1385  if (next == fm->top) {
1386  size_t tsize = fm->topsize += psize;
1387  fm->top = p;
1388  p->head = tsize | PINUSE_BIT;
1389  if (p == fm->dv) {
1390  fm->dv = 0;
1391  fm->dvsize = 0;
1392  }
1393  if (tsize > fm->trim_check)
1394  alloc_trim(fm, 0);
1395  return NULL;
1396  } else if (next == fm->dv) {
1397  size_t dsize = fm->dvsize += psize;
1398  fm->dv = p;
1400  return NULL;
1401  } else {
1402  size_t nsize = chunksize(next);
1403  psize += nsize;
1404  unlink_chunk(fm, next, nsize);
1406  if (p == fm->dv) {
1407  fm->dvsize = psize;
1408  return NULL;
1409  }
1410  }
1411  } else {
1412  set_free_with_pinuse(p, psize, next);
1413  }
1414 
1415  if (is_small(psize)) {
1416  insert_small_chunk(fm, p, psize);
1417  } else {
1418  tchunkptr tp = (tchunkptr)p;
1419  insert_large_chunk(fm, tp, psize);
1420  if (--fm->release_checks == 0)
1422  }
1423  }
1424  return NULL;
1425 }
1426 
1427 static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
1428 {
1429  if (nsize >= MAX_REQUEST) {
1430  return NULL;
1431  } else {
1432  mstate m = (mstate)msp;
1433  mchunkptr oldp = mem2chunk(ptr);
1434  size_t oldsize = chunksize(oldp);
1435  mchunkptr next = chunk_plus_offset(oldp, oldsize);
1436  mchunkptr newp = 0;
1437  size_t nb = request2size(nsize);
1438 
1439  /* Try to either shrink or extend into top. Else malloc-copy-free */
1440  if (is_direct(oldp)) {
1441  newp = direct_resize(oldp, nb); /* this may return NULL. */
1442  } else if (oldsize >= nb) { /* already big enough */
1443  size_t rsize = oldsize - nb;
1444  newp = oldp;
1445  if (rsize >= MIN_CHUNK_SIZE) {
1446  mchunkptr rem = chunk_plus_offset(newp, nb);
1447  set_inuse(m, newp, nb);
1448  set_inuse(m, rem, rsize);
1449  lj_alloc_free(m, chunk2mem(rem));
1450  }
1451  } else if (next == m->top && oldsize + m->topsize > nb) {
1452  /* Expand into top */
1453  size_t newsize = oldsize + m->topsize;
1454  size_t newtopsize = newsize - nb;
1455  mchunkptr newtop = chunk_plus_offset(oldp, nb);
1456  set_inuse(m, oldp, nb);
1457  newtop->head = newtopsize |PINUSE_BIT;
1458  m->top = newtop;
1459  m->topsize = newtopsize;
1460  newp = oldp;
1461  }
1462 
1463  if (newp != 0) {
1464  return chunk2mem(newp);
1465  } else {
1466  void *newmem = lj_alloc_malloc(m, nsize);
1467  if (newmem != 0) {
1468  size_t oc = oldsize - overhead_for(oldp);
1469  memcpy(newmem, ptr, oc < nsize ? oc : nsize);
1470  lj_alloc_free(m, ptr);
1471  }
1472  return newmem;
1473  }
1474  }
1475 }
1476 
1477 void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
1478 {
1479  (void)osize;
1480  if (nsize == 0) {
1481  return lj_alloc_free(msp, ptr);
1482  } else if (ptr == NULL) {
1483  return lj_alloc_malloc(msp, nsize);
1484  } else {
1485  return lj_alloc_realloc(msp, ptr, nsize);
1486  }
1487 }
1488 
1489 #endif
cp
Definition: action.c:1035
q
Definition: afm2pl.c:2287
#define next(a)
Definition: aptex-macros.h:924
static void SetLastError(int err)
Definition: call.c:28
static int GetLastError(void)
Definition: call.c:26
#define b
Definition: jpegint.h:372
int v
Definition: dviconv.c:10
int csp
static void
Definition: fpif.c:118
#define tp
#define t
Definition: afcover.h:96
static FIELD_PTR prev
Definition: genind.c:36
int base
Definition: gsftopk.c:1502
#define memcpy(d, s, n)
Definition: gsftopk.c:64
pad
Definition: in_pcx.cpp:455
#define NULL
Definition: ftobjs.h:61
small capitals from c petite p
Definition: afcover.h:72
small capitals from c petite p scientific i
Definition: afcover.h:80
FT_UInt idx
Definition: cffcmap.c:135
unsigned int uintptr_t
Definition: stdint.h:119
int errno
#define read
Definition: win32lib.h:88
#define O_RDONLY
Definition: win32lib.h:176
#define open
Definition: win32lib.h:86
#define close
Definition: win32lib.h:63
static int ret
Definition: convert.c:72
#define mem
Definition: synctex.c:171
#define size_t
Definition: glob.c:257
unsigned int DWORD
Definition: mktexlib.h:49
int tn
Definition: fc-lang.py:225
unsigned long ULONG
Definition: pdfgen.h:158
set set set set set set set set set set set set set set set set set set set set *set set set macro pixldst op &r &cond WK op &r &cond WK op &r &cond WK else op &m &cond &ia op &r &cond WK else op &m &cond &ia elseif elseif else error unsupported base if elseif elseif else error unsupported unaligned pixldst unaligned endm macro pixst base base else pixldst base endif endm macro PF ptr
integer nb
Definition: pmxab.c:180
static int size
Definition: ppmlabel.c:24
int r
Definition: ppmqvga.c:68
static int offset
Definition: ppmtogif.c:642
bstring c int memset(void *s, int c, int length)
#define flags
struct malloc_tree_chunk * tbinptr
Definition: lj_alloc.c:515
size_t bindex_t
Definition: lj_alloc.c:415
unsigned int binmap_t
Definition: lj_alloc.c:416
void * lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
Definition: lj_alloc.c:1477
void lj_alloc_destroy(void *msp)
Definition: lj_alloc.c:1261
void * lj_alloc_create(void)
Definition: lj_alloc.c:1237
struct malloc_state * mstate
Definition: lj_alloc.c:557
unsigned int flag_t
Definition: lj_alloc.c:417
struct malloc_chunk * mchunkptr
Definition: lj_alloc.c:413
struct malloc_segment * msegmentptr
Definition: lj_alloc.c:529
struct malloc_chunk * sbinptr
Definition: lj_alloc.c:414
struct malloc_tree_chunk * tchunkptr
Definition: lj_alloc.c:514
#define LJ_PAGESIZE
Definition: lj_arch.h:366
#define LJ_LIKELY(x)
Definition: lj_def.h:323
#define LJ_UNLIKELY(x)
Definition: lj_def.h:324
#define cinuse(p)
Definition: lj_alloc.c:462
#define compute_tree_index(S, I)
Definition: lj_alloc.c:626
#define LJ_ALLOC_MMAP_PROBE_LINEAR
Definition: lj_alloc.c:225
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
Definition: lj_alloc.c:681
#define DEFAULT_GRANULARITY
Definition: lj_alloc.c:40
static void init_top(mstate m, mchunkptr p, size_t psize)
Definition: lj_alloc.c:886
#define chunksize(p)
Definition: lj_alloc.c:464
#define FENCEPOST_HEAD
Definition: lj_alloc.c:459
#define NTREEBINS
Definition: lj_alloc.c:535
#define set_size_and_pinuse_of_free_chunk(p, s)
Definition: lj_alloc.c:485
#define INIT_MMAP()
Definition: lj_alloc.c:392
#define SIZE_T_ONE
Definition: lj_alloc.c:54
#define set_inuse(M, p, s)
Definition: lj_alloc.c:671
#define small_index(s)
Definition: lj_alloc.c:617
static void * mmap_probe(size_t size)
Definition: lj_alloc.c:248
#define chunk_plus_offset(p, s)
Definition: lj_alloc.c:470
#define left_bits(x)
Definition: lj_alloc.c:668
#define chunk_minus_offset(p, s)
Definition: lj_alloc.c:471
static void add_segment(mstate m, char *tbase, size_t tsize)
Definition: lj_alloc.c:946
#define FOUR_SIZE_T_SIZES
Definition: lj_alloc.c:57
#define unlink_large_chunk(M, X)
Definition: lj_alloc.c:779
#define align_offset(A)
Definition: lj_alloc.c:64
#define SIZE_T_SIZE
Definition: lj_alloc.c:48
#define LJ_ALLOC_MMAP_PROBE_MAX
Definition: lj_alloc.c:224
#define segment_holds(S, A)
Definition: lj_alloc.c:579
#define IS_DIRECT_BIT
Definition: lj_alloc.c:73
#define replace_dv(M, P, S)
Definition: lj_alloc.c:727
#define small_index2size(i)
Definition: lj_alloc.c:618
#define is_direct(p)
Definition: lj_alloc.c:492
#define TOP_FOOT_SIZE
Definition: lj_alloc.c:611
static int alloc_trim(mstate m, size_t pad)
Definition: lj_alloc.c:1098
#define DEFAULT_MMAP_THRESHOLD
Definition: lj_alloc.c:42
#define LJ_ALLOC_MMAP_PROBE_LOWER
Definition: lj_alloc.c:227
static LJ_NOINLINE void * lj_alloc_malloc(void *msp, size_t nsize)
Definition: lj_alloc.c:1273
#define set_free_with_pinuse(p, s, n)
Definition: lj_alloc.c:489
#define mem2chunk(mem)
Definition: lj_alloc.c:436
#define CHUNK_ALIGN_MASK
Definition: lj_alloc.c:61
#define unlink_first_small_chunk(M, B, P, I)
Definition: lj_alloc.c:715
#define granularity_align(S)
Definition: lj_alloc.c:568
static void init_bins(mstate m)
Definition: lj_alloc.c:902
#define leftmost_child(t)
Definition: lj_alloc.c:518
static void * direct_alloc(size_t nb)
Definition: lj_alloc.c:837
#define leftshift_for_tree_index(i)
Definition: lj_alloc.c:644
#define set_inuse_and_pinuse(M, p, s)
Definition: lj_alloc.c:676
#define MFAIL
Definition: lj_alloc.c:70
#define DIRECT_MMAP(s)
Definition: lj_alloc.c:396
#define MAX_SMALL_REQUEST
Definition: lj_alloc.c:541
#define CALL_MMAP(size)
Definition: lj_alloc.c:335
static void * tmalloc_small(mstate m, size_t nb)
Definition: lj_alloc.c:1205
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
Definition: lj_alloc.c:856
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
Definition: lj_alloc.c:913
#define MAX_REQUEST
Definition: lj_alloc.c:441
#define pinuse(p)
Definition: lj_alloc.c:463
#define INUSE_BITS
Definition: lj_alloc.c:456
#define smallbin_at(M, i)
Definition: lj_alloc.c:622
#define next_chunk(p)
Definition: lj_alloc.c:474
static int CALL_MUNMAP(void *ptr, size_t size)
Definition: lj_alloc.c:360
#define DIRECT_FOOT_PAD
Definition: lj_alloc.c:428
#define align_as_chunk(A)
Definition: lj_alloc.c:438
#define MIN_REQUEST
Definition: lj_alloc.c:442
#define CINUSE_BIT
Definition: lj_alloc.c:455
static size_t release_unused_segments(mstate m)
Definition: lj_alloc.c:1056
#define treebin_at(M, i)
Definition: lj_alloc.c:623
#define SIZE_T_BITSIZE
Definition: lj_alloc.c:49
#define PINUSE_BIT
Definition: lj_alloc.c:454
#define is_small(s)
Definition: lj_alloc.c:616
static int has_segment_link(mstate m, msegmentptr ss)
Definition: lj_alloc.c:595
static void * alloc_sys(mstate m, size_t nb)
Definition: lj_alloc.c:994
#define MMAP_PROT
Definition: lj_alloc.c:210
#define request2size(req)
Definition: lj_alloc.c:449
#define pad_request(req)
Definition: lj_alloc.c:445
static void * tmalloc_large(mstate m, size_t nb)
Definition: lj_alloc.c:1141
#define MIN_CHUNK_SIZE
Definition: lj_alloc.c:431
#define mmap_align(S)
Definition: lj_alloc.c:575
#define insert_large_chunk(M, X, S)
Definition: lj_alloc.c:740
#define chunk2mem(p)
Definition: lj_alloc.c:435
static LJ_NOINLINE void * lj_alloc_free(void *msp, void *ptr)
Definition: lj_alloc.c:1356
#define MMAP_FLAGS
Definition: lj_alloc.c:214
#define MMAP_FLAGS_PROBE
Definition: lj_alloc.c:221
#define unlink_chunk(M, P, S)
Definition: lj_alloc.c:831
#define LJ_ALLOC_MBITS
Definition: lj_alloc.c:106
#define MAX_SIZE_T
Definition: lj_alloc.c:37
#define overhead_for(p)
Definition: lj_alloc.c:496
#define insert_small_chunk(M, P, S)
Definition: lj_alloc.c:687
static msegmentptr segment_holding(mstate m, char *addr)
Definition: lj_alloc.c:583
static uintptr_t mmap_probe_seed(void)
Definition: lj_alloc.c:236
#define SIX_SIZE_T_SIZES
Definition: lj_alloc.c:58
#define CMFAIL
Definition: lj_alloc.c:71
#define insert_chunk(M, P, S)
Definition: lj_alloc.c:827
#define DEFAULT_TRIM_THRESHOLD
Definition: lj_alloc.c:41
#define idx2bit(i)
Definition: lj_alloc.c:656
#define MAX_RELEASE_CHECK_RATE
Definition: lj_alloc.c:43
static LJ_NOINLINE void * lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
Definition: lj_alloc.c:1427
#define is_initialized(M)
Definition: lj_alloc.c:559
#define CALL_MREMAP(addr, osz, nsz, mv)
Definition: lj_alloc.c:400
#define NSMALLBINS
Definition: lj_alloc.c:534
#define uintptr_t
Definition: stdint.in.h:321
Definition: sed.h:84
Definition: job.h:44
struct malloc_chunk * bk
Definition: lj_alloc.c:409
size_t prev_foot
Definition: lj_alloc.c:406
struct malloc_chunk * fd
Definition: lj_alloc.c:408
size_t head
Definition: lj_alloc.c:407
struct malloc_segment * next
Definition: lj_alloc.c:525
size_t size
Definition: lj_alloc.c:524
char * base
Definition: lj_alloc.c:523
tbinptr treebins[(32U)]
Definition: lj_alloc.c:553
size_t trim_check
Definition: lj_alloc.c:550
mchunkptr top
Definition: lj_alloc.c:549
mchunkptr dv
Definition: lj_alloc.c:548
msegment seg
Definition: lj_alloc.c:554
binmap_t smallmap
Definition: lj_alloc.c:544
size_t topsize
Definition: lj_alloc.c:547
binmap_t treemap
Definition: lj_alloc.c:545
size_t dvsize
Definition: lj_alloc.c:546
mchunkptr smallbins[((32U)+1) *2]
Definition: lj_alloc.c:552
size_t release_checks
Definition: lj_alloc.c:551
size_t prev_foot
Definition: lj_alloc.c:503
struct malloc_tree_chunk * fd
Definition: lj_alloc.c:505
bindex_t index
Definition: lj_alloc.c:510
struct malloc_tree_chunk * bk
Definition: lj_alloc.c:506
struct malloc_tree_chunk * parent
Definition: lj_alloc.c:509
Definition: stemdb.c:56
Definition: dvips.h:235
Definition: fio.h:71
Definition: strexpr.c:21
val
Definition: tex4ht.c:3227
m
Definition: tex4ht.c:3990
#define sp
Definition: stack.c:11
static UBool fallback(char *loc)
Definition: ucurr.cpp:604
@ msize
Definition: preamble.c:50
static int nsegs
Definition: gc.c:297