CUBRID Engine  latest
malloc_2_8_3.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * Following is Doug Lea's memory allocator with USE_MALLOC_INSTEAD
21  * feature amendment
22  *
23  * USE_MALLOC_INSTEAD
24  * When this feature is enabled, uses system malloc/free instead of mmap/munmap.
25  *
26  * ENABLE_SEPARATE_MMAP_EVENT_TRACE
27  * Fix the problem that mmaped (malloced when USE_MALLOC_INSTEAD is 1)
28  * memory region (which is returned by mmap_alloc function) is not
29  * automatically freed when destroy_mspace is called.
30  *
31  */
32 
33 /*===========================================================================*/
34 /*===========================================================================*/
35 /*===========================================================================*/
36 
37 /*
38  This is a version (aka dlmalloc) of malloc/free/realloc written by
39  Doug Lea and released to the public domain, as explained at
40  http://creativecommons.org/licenses/publicdomain. Send questions,
41  comments, complaints, performance data, etc to dl@cs.oswego.edu
42 
43 * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
44 
45  Note: There may be an updated version of this malloc obtainable at
46  ftp://gee.cs.oswego.edu/pub/misc/malloc.c
47  Check before installing!
48 
49 * Quickstart
50 
51  This library is all in one file to simplify the most common usage:
52  ftp it, compile it (-O3), and link it into another program. All of
53  the compile-time options default to reasonable values for use on
54  most platforms. You might later want to step through various
55  compile-time and dynamic tuning options.
56 
57  For convenience, an include file for code using this malloc is at:
58  ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
59  You don't really need this .h file unless you call functions not
60  defined in your system include files. The .h file contains only the
61  excerpts from this file needed for using this malloc on ANSI C/C++
62  systems, so long as you haven't changed compile-time options about
63  naming and tuning parameters. If you do, then you can create your
64  own malloc.h that does include all settings by cutting at the point
65  indicated below. Note that you may already by default be using a C
66  library containing a malloc that is based on some version of this
67  malloc (for example in linux). You might still want to use the one
68  in this file to customize settings or to avoid overheads associated
69  with library versions.
70 
71 * Vital statistics:
72 
73  Supported pointer/size_t representation: 4 or 8 bytes
74  size_t MUST be an unsigned type of the same width as
75  pointers. (If you are using an ancient system that declares
76  size_t as a signed type, or need it to be a different width
77  than pointers, you can use a previous release of this malloc
78  (e.g. 2.7.2) supporting these.)
79 
80  Alignment: 8 bytes (default)
81  This suffices for nearly all current machines and C compilers.
82  However, you can define MALLOC_ALIGNMENT to be wider than this
83  if necessary (up to 128bytes), at the expense of using more space.
84 
85  Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
86  8 or 16 bytes (if 8byte sizes)
87  Each malloced chunk has a hidden word of overhead holding size
88  and status information, and additional cross-check word
89  if FOOTERS is defined.
90 
91  Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
92  8-byte ptrs: 32 bytes (including overhead)
93 
94  Even a request for zero bytes (i.e., malloc(0)) returns a
95  pointer to something of the minimum allocatable size.
96  The maximum overhead wastage (i.e., number of extra bytes
97  allocated than were requested in malloc) is less than or equal
98  to the minimum size, except for requests >= mmap_threshold that
99  are serviced via mmap(), where the worst case wastage is about
100  32 bytes plus the remainder from a system page (the minimal
101  mmap unit); typically 4096 or 8192 bytes.
102 
103  Security: static-safe; optionally more or less
104  The "security" of malloc refers to the ability of malicious
105  code to accentuate the effects of errors (for example, freeing
106  space that is not currently malloc'ed or overwriting past the
107  ends of chunks) in code that calls malloc. This malloc
108  guarantees not to modify any memory locations below the base of
109  heap, i.e., static variables, even in the presence of usage
110  errors. The routines additionally detect most improper frees
111  and reallocs. All this holds as long as the static bookkeeping
112  for malloc itself is not corrupted by some other means. This
113  is only one aspect of security -- these checks do not, and
114  cannot, detect all possible programming errors.
115 
116  If FOOTERS is defined nonzero, then each allocated chunk
117  carries an additional check word to verify that it was malloced
118  from its space. These check words are the same within each
119  execution of a program using malloc, but differ across
120  executions, so externally crafted fake chunks cannot be
121  freed. This improves security by rejecting frees/reallocs that
122  could corrupt heap memory, in addition to the checks preventing
123  writes to statics that are always on. This may further improve
124  security at the expense of time and space overhead. (Note that
125  FOOTERS may also be worth using with MSPACES.)
126 
127  By default detected errors cause the program to abort (calling
128  "abort()"). You can override this to instead proceed past
129  errors by defining PROCEED_ON_ERROR. In this case, a bad free
130  has no effect, and a malloc that encounters a bad address
131  caused by user overwrites will ignore the bad address by
132  dropping pointers and indices to all known memory. This may
133  be appropriate for programs that should continue if at all
134  possible in the face of programming errors, although they may
135  run out of memory because dropped memory is never reclaimed.
136 
137  If you don't like either of these options, you can define
138  CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
139  else. And if if you are sure that your program using malloc has
140  no errors or vulnerabilities, you can define INSECURE to 1,
141  which might (or might not) provide a small performance improvement.
142 
143  Thread-safety: NOT thread-safe unless USE_LOCKS defined
144  When USE_LOCKS is defined, each public call to malloc, free,
145  etc is surrounded with either a pthread mutex or a win32
146  spinlock (depending on WIN32). This is not especially fast, and
147  can be a major bottleneck. It is designed only to provide
148  minimal protection in concurrent environments, and to provide a
149  basis for extensions. If you are using malloc in a concurrent
150  program, consider instead using ptmalloc, which is derived from
151  a version of this malloc. (See http://www.malloc.de).
152 
153  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
154  This malloc can use unix sbrk or any emulation (invoked using
155  the CALL_MORECORE macro) and/or mmap/munmap or any emulation
156  (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
157  memory. On most unix systems, it tends to work best if both
158  MORECORE and MMAP are enabled. On Win32, it uses emulations
159  based on VirtualAlloc. It also uses common C library functions
160  like memset.
161 
162  Compliance: I believe it is compliant with the Single Unix Specification
163  (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
164  others as well.
165 
166 * Overview of algorithms
167 
168  This is not the fastest, most space-conserving, most portable, or
169  most tunable malloc ever written. However it is among the fastest
170  while also being among the most space-conserving, portable and
171  tunable. Consistent balance across these factors results in a good
172  general-purpose allocator for malloc-intensive programs.
173 
174  In most ways, this malloc is a best-fit allocator. Generally, it
175  chooses the best-fitting existing chunk for a request, with ties
176  broken in approximately least-recently-used order. (This strategy
177  normally maintains low fragmentation.) However, for requests less
178  than 256bytes, it deviates from best-fit when there is not an
179  exactly fitting available chunk by preferring to use space adjacent
180  to that used for the previous small request, as well as by breaking
181  ties in approximately most-recently-used order. (These enhance
182  locality of series of small allocations.) And for very large requests
183  (>= 256Kb by default), it relies on system memory mapping
184  facilities, if supported. (This helps avoid carrying around and
185  possibly fragmenting memory used only for large chunks.)
186 
187  All operations (except malloc_stats and mallinfo) have execution
188  times that are bounded by a constant factor of the number of bits in
189  a size_t, not counting any clearing in calloc or copying in realloc,
190  or actions surrounding MORECORE and MMAP that have times
191  proportional to the number of non-contiguous regions returned by
192  system allocation routines, which is often just 1.
193 
194  The implementation is not very modular and seriously overuses
195  macros. Perhaps someday all C compilers will do as good a job
196  inlining modular code as can now be done by brute-force expansion,
197  but now, enough of them seem not to.
198 
199  Some compilers issue a lot of warnings about code that is
200  dead/unreachable only on some platforms, and also about intentional
201  uses of negation on unsigned types. All known cases of each can be
202  ignored.
203 
204  For a longer but out of date high-level description, see
205  http://gee.cs.oswego.edu/dl/html/malloc.html
206 
207 * MSPACES
208  If MSPACES is defined, then in addition to malloc, free, etc.,
209  this file also defines mspace_malloc, mspace_free, etc. These
210  are versions of malloc routines that take an "mspace" argument
211  obtained using create_mspace, to control all internal bookkeeping.
212  If ONLY_MSPACES is defined, only these versions are compiled.
213  So if you would like to use this allocator for only some allocations,
214  and your system malloc for others, you can compile with
215  ONLY_MSPACES and then do something like...
216  static mspace mymspace = create_mspace(0,0); // for example
217  #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
218 
219  (Note: If you only need one instance of an mspace, you can instead
220  use "USE_DL_PREFIX" to relabel the global malloc.)
221 
222  You can similarly create thread-local allocators by storing
223  mspaces as thread-locals. For example:
224  static __thread mspace tlms = 0;
225  void* tlmalloc(size_t bytes) {
226  if (tlms == 0) tlms = create_mspace(0, 0);
227  return mspace_malloc(tlms, bytes);
228  }
229  void tlfree(void* mem) { mspace_free(tlms, mem); }
230 
231  Unless FOOTERS is defined, each mspace is completely independent.
232  You cannot allocate from one and free to another (although
233  conformance is only weakly checked, so usage errors are not always
234  caught). If FOOTERS is defined, then each chunk carries around a tag
235  indicating its originating mspace, and frees are directed to their
236  originating spaces.
237 
238  ------------------------- Compile-time options ---------------------------
239 
240 Be careful in setting #define values for numerical constants of type
241 size_t. On some systems, literal values are not automatically extended
242 to size_t precision unless they are explicitly casted.
243 
244 WIN32 default: defined if _WIN32 defined
245  Defining WIN32 sets up defaults for MS environment and compilers.
246  Otherwise defaults are for unix.
247 
248 MALLOC_ALIGNMENT default: (size_t)8
249  Controls the minimum alignment for malloc'ed chunks. It must be a
250  power of two and at least 8, even on machines for which smaller
251  alignments would suffice. It may be defined as larger than this
252  though. Note however that code and data structures are optimized for
253  the case of 8-byte alignment.
254 
255 MSPACES default: 0 (false)
256  If true, compile in support for independent allocation spaces.
257  This is only supported if HAVE_MMAP is true.
258 
259 ONLY_MSPACES default: 0 (false)
260  If true, only compile in mspace versions, not regular versions.
261 
262 USE_LOCKS default: 0 (false)
263  Causes each call to each public routine to be surrounded with
264  pthread or WIN32 mutex lock/unlock. (If set true, this can be
265  overridden on a per-mspace basis for mspace versions.)
266 
267 FOOTERS default: 0
268  If true, provide extra checking and dispatching by placing
269  information in the footers of allocated chunks. This adds
270  space and time overhead.
271 
272 INSECURE default: 0
273  If true, omit checks for usage errors and heap space overwrites.
274 
275 USE_DL_PREFIX default: NOT defined
276  Causes compiler to prefix all public routines with the string 'dl'.
277  This can be useful when you only want to use this malloc in one part
278  of a program, using your regular system malloc elsewhere.
279 
280 ABORT default: defined as abort()
281  Defines how to abort on failed checks. On most systems, a failed
282  check cannot die with an "assert" or even print an informative
283  message, because the underlying print routines in turn call malloc,
284  which will fail again. Generally, the best policy is to simply call
285  abort(). It's not very useful to do more than this because many
286  errors due to overwriting will show up as address faults (null, odd
287  addresses etc) rather than malloc-triggered checks, so will also
288  abort. Also, most compilers know that abort() does not return, so
289  can better optimize code conditionally calling it.
290 
291 PROCEED_ON_ERROR default: defined as 0 (false)
292  Controls whether detected bad addresses cause them to bypassed
293  rather than aborting. If set, detected bad arguments to free and
294  realloc are ignored. And all bookkeeping information is zeroed out
295  upon a detected overwrite of freed heap space, thus losing the
296  ability to ever return it from malloc again, but enabling the
297  application to proceed. If PROCEED_ON_ERROR is defined, the
298  static variable malloc_corruption_error_count is compiled in
299  and can be examined to see if errors have occurred. This option
300  generates slower code than the default abort policy.
301 
302 DEBUG default: NOT defined
303  The DEBUG setting is mainly intended for people trying to modify
304  this code or diagnose problems when porting to new platforms.
305  However, it may also be able to better isolate user errors than just
306  using runtime checks. The assertions in the check routines spell
307  out in more detail the assumptions and invariants underlying the
308  algorithms. The checking is fairly extensive, and will slow down
309  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
310  set will attempt to check every non-mmapped allocated and free chunk
311  in the course of computing the summaries.
312 
313 ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
314  Debugging assertion failures can be nearly impossible if your
315  version of the assert macro causes malloc to be called, which will
316  lead to a cascade of further failures, blowing the runtime stack.
317  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
318  which will usually make debugging easier.
319 
320 MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
321  The action to take before "return 0" when malloc fails to be able to
322  return memory because there is none available.
323 
324 HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
325  True if this system supports sbrk or an emulation of it.
326 
327 MORECORE default: sbrk
328  The name of the sbrk-style system routine to call to obtain more
329  memory. See below for guidance on writing custom MORECORE
330  functions. The type of the argument to sbrk/MORECORE varies across
331  systems. It cannot be size_t, because it supports negative
332  arguments, so it is normally the signed type of the same width as
333  size_t (sometimes declared as "intptr_t"). It doesn't much matter
334  though. Internally, we only call it with arguments less than half
335  the max value of a size_t, which should work across all reasonable
336  possibilities, although sometimes generating compiler warnings. See
337  near the end of this file for guidelines for creating a custom
338  version of MORECORE.
339 
340 MORECORE_CONTIGUOUS default: 1 (true)
341  If true, take advantage of fact that consecutive calls to MORECORE
342  with positive arguments always return contiguous increasing
343  addresses. This is true of unix sbrk. It does not hurt too much to
344  set it true anyway, since malloc copes with non-contiguities.
345  Setting it false when definitely non-contiguous saves time
346  and possibly wasted space it would take to discover this though.
347 
348 MORECORE_CANNOT_TRIM default: NOT defined
349  True if MORECORE cannot release space back to the system when given
350  negative arguments. This is generally necessary only if you are
351  using a hand-crafted MORECORE function that cannot handle negative
352  arguments.
353 
354 HAVE_MMAP default: 1 (true)
355  True if this system supports mmap or an emulation of it. If so, and
356  HAVE_MORECORE is not true, MMAP is used for all system
357  allocation. If set and HAVE_MORECORE is true as well, MMAP is
358  primarily used to directly allocate very large blocks. It is also
359  used as a backup strategy in cases where MORECORE fails to provide
360  space from system. Note: A single call to MUNMAP is assumed to be
361  able to unmap memory that may have be allocated using multiple calls
362  to MMAP, so long as they are adjacent.
363 
364 HAVE_MREMAP default: 1 on linux, else 0
365  If true realloc() uses mremap() to re-allocate large blocks and
366  extend or shrink allocation spaces.
367 
368 MMAP_CLEARS default: 1 on unix
369  True if mmap clears memory so calloc doesn't need to. This is true
370  for standard unix mmap using /dev/zero.
371 
372 USE_BUILTIN_FFS default: 0 (i.e., not used)
373  Causes malloc to use the builtin ffs() function to compute indices.
374  Some compilers may recognize and intrinsify ffs to be faster than the
375  supplied C version. Also, the case of x86 using gcc is special-cased
376  to an asm instruction, so is already as fast as it can be, and so
377  this setting has no effect. (On most x86s, the asm version is only
378  slightly faster than the C version.)
379 
380 malloc_getpagesize default: derive from system includes, or 4096.
381  The system page size. To the extent possible, this malloc manages
382  memory from the system in page-size units. This may be (and
383  usually is) a function rather than a constant. This is ignored
384  if WIN32, where page size is determined using getSystemInfo during
385  initialization.
386 
387 USE_DEV_RANDOM default: 0 (i.e., not used)
388  Causes malloc to use /dev/random to initialize secure magic seed for
389  stamping footers. Otherwise, the current time is used.
390 
391 NO_MALLINFO default: 0
392  If defined, don't compile "mallinfo". This can be a simple way
393  of dealing with mismatches between system declarations and
394  those in this file.
395 
396 MALLINFO_FIELD_TYPE default: size_t
397  The type of the fields in the mallinfo struct. This was originally
398  defined as "int" in SVID etc, but is more usefully defined as
399  size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
400 
401 REALLOC_ZERO_BYTES_FREES default: not defined
402  This should be set if a call to realloc with zero bytes should
403  be the same as a call to free. Some people think it should. Otherwise,
404  since this malloc returns a unique pointer for malloc(0), so does
405  realloc(p, 0).
406 
407 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
408 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
409 LACKS_STDLIB_H default: NOT defined unless on WIN32
410  Define these if your system does not have these header files.
411  You might need to manually insert some of the declarations they provide.
412 
413 DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
414  system_info.dwAllocationGranularity in WIN32,
415  otherwise 64K.
416  Also settable using mallopt(M_GRANULARITY, x)
417  The unit for allocating and deallocating memory from the system. On
418  most systems with contiguous MORECORE, there is no reason to
419  make this more than a page. However, systems with MMAP tend to
420  either require or encourage larger granularities. You can increase
421  this value to prevent system allocation functions to be called so
422  often, especially if they are slow. The value must be at least one
423  page and must be a power of two. Setting to 0 causes initialization
424  to either page size or win32 region size. (Note: In previous
425  versions of malloc, the equivalent of this option was called
426  "TOP_PAD")
427 
428 DEFAULT_TRIM_THRESHOLD default: 2MB
429  Also settable using mallopt(M_TRIM_THRESHOLD, x)
430  The maximum amount of unused top-most memory to keep before
431  releasing via malloc_trim in free(). Automatic trimming is mainly
432  useful in long-lived programs using contiguous MORECORE. Because
433  trimming via sbrk can be slow on some systems, and can sometimes be
434  wasteful (in cases where programs immediately afterward allocate
435  more large chunks) the value should be high enough so that your
436  overall system performance would improve by releasing this much
437  memory. As a rough guide, you might set to a value close to the
438  average size of a process (program) running on your system.
439  Releasing this much memory would allow such a process to run in
440  memory. Generally, it is worth tuning trim thresholds when a
441  program undergoes phases where several large chunks are allocated
442  and released in ways that can reuse each other's storage, perhaps
443  mixed with phases where there are no such chunks at all. The trim
444  value must be greater than page size to have any useful effect. To
445  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
446  some people use of mallocing a huge space and then freeing it at
447  program startup, in an attempt to reserve system memory, doesn't
448  have the intended effect under automatic trimming, since that memory
449  will immediately be returned to the system.
450 
451 DEFAULT_MMAP_THRESHOLD default: 256K
452  Also settable using mallopt(M_MMAP_THRESHOLD, x)
453  The request size threshold for using MMAP to directly service a
454  request. Requests of at least this size that cannot be allocated
455  using already-existing space will be serviced via mmap. (If enough
456  normal freed space already exists it is used instead.) Using mmap
457  segregates relatively large chunks of memory so that they can be
458  individually obtained and released from the host system. A request
459  serviced through mmap is never reused by any other request (at least
460  not directly; the system may just so happen to remap successive
461  requests to the same locations). Segregating space in this way has
462  the benefits that: Mmapped space can always be individually released
463  back to the system, which helps keep the system level memory demands
464  of a long-lived program low. Also, mapped memory doesn't become
465  `locked' between other chunks, as can happen with normally allocated
466  chunks, which means that even trimming via malloc_trim would not
467  release them. However, it has the disadvantage that the space
468  cannot be reclaimed, consolidated, and then used to service later
469  requests, as happens with normal chunks. The advantages of mmap
470  nearly always outweigh disadvantages for "large" chunks, but the
471  value of "large" may vary across systems. The default is an
472  empirically derived value that works well in most systems. You can
473  disable mmap by setting to MAX_SIZE_T.
474 
475 */
476 
477 #ifndef WIN32
478 #ifdef _WIN32
479 #define WIN32 1
480 #endif /* _WIN32 */
481 #endif /* WIN32 */
482 #ifdef WIN32
483 #define WIN32_LEAN_AND_MEAN
484 #include <windows.h>
485 #define HAVE_MMAP 1
486 #define HAVE_MORECORE 0
487 #define LACKS_UNISTD_H
488 #define LACKS_SYS_PARAM_H
489 #define LACKS_SYS_MMAN_H
490 #define LACKS_STRING_H
491 #define LACKS_STRINGS_H
492 #define LACKS_SYS_TYPES_H
493 #define LACKS_ERRNO_H
494 #define MALLOC_FAILURE_ACTION
495 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
496 #endif /* WIN32 */
497 
498 #if defined(DARWIN) || defined(_DARWIN)
499 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
500 #ifndef HAVE_MORECORE
501 #define HAVE_MORECORE 0
502 #define HAVE_MMAP 1
503 #endif /* HAVE_MORECORE */
504 #endif /* DARWIN */
505 
506 #ifndef LACKS_SYS_TYPES_H
507 #include <sys/types.h> /* For size_t */
508 #endif /* LACKS_SYS_TYPES_H */
509 
510 /* The maximum possible size_t value has all bits set */
511 #define MAX_SIZE_T (~(size_t)0)
512 
513 #ifndef ONLY_MSPACES
514 #define ONLY_MSPACES 0
515 #endif /* ONLY_MSPACES */
516 #ifndef MSPACES
517 #if ONLY_MSPACES
518 #define MSPACES 1
519 #else /* ONLY_MSPACES */
520 #define MSPACES 0
521 #endif /* ONLY_MSPACES */
522 #endif /* MSPACES */
523 #ifndef MALLOC_ALIGNMENT
524 #define MALLOC_ALIGNMENT ((size_t)8U)
525 #endif /* MALLOC_ALIGNMENT */
526 #ifndef FOOTERS
527 #define FOOTERS 0
528 #endif /* FOOTERS */
529 #ifndef ABORT
530 #define ABORT abort()
531 #endif /* ABORT */
532 #ifndef ABORT_ON_ASSERT_FAILURE
533 #define ABORT_ON_ASSERT_FAILURE 1
534 #endif /* ABORT_ON_ASSERT_FAILURE */
535 #ifndef PROCEED_ON_ERROR
536 #define PROCEED_ON_ERROR 0
537 #endif /* PROCEED_ON_ERROR */
538 #ifndef USE_LOCKS
539 #define USE_LOCKS 0
540 #endif /* USE_LOCKS */
541 #ifndef INSECURE
542 #define INSECURE 0
543 #endif /* INSECURE */
544 #ifndef HAVE_MMAP
545 #define HAVE_MMAP 1
546 #endif /* HAVE_MMAP */
547 #ifndef MMAP_CLEARS
548 #define MMAP_CLEARS 1
549 #endif /* MMAP_CLEARS */
550 #ifndef HAVE_MREMAP
551 #ifdef linux
552 #define HAVE_MREMAP 1
553 #else /* linux */
554 #define HAVE_MREMAP 0
555 #endif /* linux */
556 #endif /* HAVE_MREMAP */
557 #ifndef MALLOC_FAILURE_ACTION
558 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
559 #endif /* MALLOC_FAILURE_ACTION */
560 #ifndef HAVE_MORECORE
561 #if ONLY_MSPACES
562 #define HAVE_MORECORE 0
563 #else /* ONLY_MSPACES */
564 #define HAVE_MORECORE 1
565 #endif /* ONLY_MSPACES */
566 #endif /* HAVE_MORECORE */
567 #if !HAVE_MORECORE
568 #define MORECORE_CONTIGUOUS 0
569 #else /* !HAVE_MORECORE */
570 #ifndef MORECORE
571 #define MORECORE sbrk
572 #endif /* MORECORE */
573 #ifndef MORECORE_CONTIGUOUS
574 #define MORECORE_CONTIGUOUS 1
575 #endif /* MORECORE_CONTIGUOUS */
576 #endif /* HAVE_MORECORE */
577 #ifndef DEFAULT_GRANULARITY
578 #if MORECORE_CONTIGUOUS
579 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
580 #else /* MORECORE_CONTIGUOUS */
581 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
582 #endif /* MORECORE_CONTIGUOUS */
583 #endif /* DEFAULT_GRANULARITY */
584 #ifndef DEFAULT_TRIM_THRESHOLD
585 #ifndef MORECORE_CANNOT_TRIM
586 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
587 #else /* MORECORE_CANNOT_TRIM */
588 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
589 #endif /* MORECORE_CANNOT_TRIM */
590 #endif /* DEFAULT_TRIM_THRESHOLD */
591 #ifndef DEFAULT_MMAP_THRESHOLD
592 #if HAVE_MMAP
593 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
594 #else /* HAVE_MMAP */
595 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
596 #endif /* HAVE_MMAP */
597 #endif /* DEFAULT_MMAP_THRESHOLD */
598 #ifndef USE_BUILTIN_FFS
599 #define USE_BUILTIN_FFS 0
600 #endif /* USE_BUILTIN_FFS */
601 #ifndef USE_DEV_RANDOM
602 #define USE_DEV_RANDOM 0
603 #endif /* USE_DEV_RANDOM */
604 #ifndef NO_MALLINFO
605 #define NO_MALLINFO 0
606 #endif /* NO_MALLINFO */
607 #ifndef MALLINFO_FIELD_TYPE
608 #define MALLINFO_FIELD_TYPE size_t
609 #endif /* MALLINFO_FIELD_TYPE */
610 
611 /*
612  mallopt tuning options. SVID/XPG defines four standard parameter
613  numbers for mallopt, normally defined in malloc.h. None of these
614  are used in this malloc, so setting them has no effect. But this
615  malloc does support the following options.
616 */
617 
618 #define M_TRIM_THRESHOLD (-1)
619 #define M_GRANULARITY (-2)
620 #define M_MMAP_THRESHOLD (-3)
621 
622 /* ------------------------ Mallinfo declarations ------------------------ */
623 
624 #if !NO_MALLINFO
625 /*
626  This version of malloc supports the standard SVID/XPG mallinfo
627  routine that returns a struct containing usage properties and
628  statistics. It should work on any system that has a
629  /usr/include/malloc.h defining struct mallinfo. The main
630  declaration needed is the mallinfo struct that is returned (by-copy)
631  by mallinfo(). The malloinfo struct contains a bunch of fields that
632  are not even meaningful in this version of malloc. These fields are
633  are instead filled by mallinfo() with other numbers that might be of
634  interest.
635 
636  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
637  /usr/include/malloc.h file that includes a declaration of struct
638  mallinfo. If so, it is included; else a compliant version is
639  declared below. These must be precisely the same for mallinfo() to
640  work. The original SVID version of this struct, defined on most
641  systems with mallinfo, declares all fields as ints. But some others
642  define as unsigned long. If your system defines the fields using a
643  type of different width than listed here, you MUST #include your
644  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
645 */
646 
647 /* #define HAVE_USR_INCLUDE_MALLOC_H */
648 
649 #ifdef HAVE_USR_INCLUDE_MALLOC_H
650 #include "/usr/include/malloc.h"
651 #else /* HAVE_USR_INCLUDE_MALLOC_H */
652 
653 struct mallinfo
654 {
655  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
656  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
657  MALLINFO_FIELD_TYPE smblks; /* always 0 */
658  MALLINFO_FIELD_TYPE hblks; /* always 0 */
659  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
660  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
661  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
662  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
663  MALLINFO_FIELD_TYPE fordblks; /* total free space */
664  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
665 };
666 
667 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
668 #endif /* NO_MALLINFO */
669 
670 #ifdef __cplusplus
671 extern "C"
672 {
673 #endif /* __cplusplus */
674 
675 #if !ONLY_MSPACES
676 
677 /* ------------------- Declarations of public routines ------------------- */
678 
679 #ifndef USE_DL_PREFIX
680 #define dlcalloc calloc
681 #define dlfree free
682 #define dlmalloc malloc
683 #define dlmemalign memalign
684 #define dlrealloc realloc
685 #define dlvalloc valloc
686 #define dlpvalloc pvalloc
687 #define dlmallinfo mallinfo
688 #define dlmallopt mallopt
689 #define dlmalloc_trim malloc_trim
690 #define dlmalloc_stats malloc_stats
691 #define dlmalloc_usable_size malloc_usable_size
692 #define dlmalloc_footprint malloc_footprint
693 #define dlmalloc_max_footprint malloc_max_footprint
694 #define dlindependent_calloc independent_calloc
695 #define dlindependent_comalloc independent_comalloc
696 #endif /* USE_DL_PREFIX */
697 
698 
699 /*
700  malloc(size_t n)
701  Returns a pointer to a newly allocated chunk of at least n bytes, or
702  null if no space is available, in which case errno is set to ENOMEM
703  on ANSI C systems.
704 
705  If n is zero, malloc returns a minimum-sized chunk. (The minimum
706  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
707  systems.) Note that size_t is an unsigned type, so calls with
708  arguments that would be negative if signed are interpreted as
709  requests for huge amounts of space, which will often fail. The
710  maximum supported value of n differs across systems, but is in all
711  cases less than the maximum representable value of a size_t.
712 */
713  void *dlmalloc (size_t);
714 
715 /*
716  free(void* p)
717  Releases the chunk of memory pointed to by p, that had been previously
718  allocated using malloc or a related routine such as realloc.
719  It has no effect if p is null. If p was not malloced or already
720  freed, free(p) will by default cause the current program to abort.
721 */
722  void dlfree (void *);
723 
724 /*
725  calloc(size_t n_elements, size_t element_size);
726  Returns a pointer to n_elements * element_size bytes, with all locations
727  set to zero.
728 */
729  void *dlcalloc (size_t, size_t);
730 
731 /*
732  realloc(void* p, size_t n)
733  Returns a pointer to a chunk of size n that contains the same data
734  as does chunk p up to the minimum of (n, p's size) bytes, or null
735  if no space is available.
736 
737  The returned pointer may or may not be the same as p. The algorithm
738  prefers extending p in most cases when possible, otherwise it
739  employs the equivalent of a malloc-copy-free sequence.
740 
741  If p is null, realloc is equivalent to malloc.
742 
743  If space is not available, realloc returns null, errno is set (if on
744  ANSI) and p is NOT freed.
745 
746  if n is for fewer bytes than already held by p, the newly unused
747  space is lopped off and freed if possible. realloc with a size
748  argument of zero (re)allocates a minimum-sized chunk.
749 
750  The old unix realloc convention of allowing the last-free'd chunk
751  to be used as an argument to realloc is not supported.
752 */
753 
754  void *dlrealloc (void *, size_t);
755 
756 /*
757  memalign(size_t alignment, size_t n);
758  Returns a pointer to a newly allocated chunk of n bytes, aligned
759  in accord with the alignment argument.
760 
761  The alignment argument should be a power of two. If the argument is
762  not a power of two, the nearest greater power is used.
763  8-byte alignment is guaranteed by normal malloc calls, so don't
764  bother calling memalign with an argument of 8 or less.
765 
766  Overreliance on memalign is a sure way to fragment space.
767 */
768  void *dlmemalign (size_t, size_t);
769 
770 /*
771  valloc(size_t n);
772  Equivalent to memalign(pagesize, n), where pagesize is the page
773  size of the system. If the pagesize is unknown, 4096 is used.
774 */
775  void *dlvalloc (size_t);
776 
777 /*
778  mallopt(int parameter_number, int parameter_value)
779  Sets tunable parameters The format is to provide a
780  (parameter-number, parameter-value) pair. mallopt then sets the
781  corresponding parameter to the argument value if it can (i.e., so
782  long as the value is meaningful), and returns 1 if successful else
783  0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
784  normally defined in malloc.h. None of these are use in this malloc,
785  so setting them has no effect. But this malloc also supports other
786  options in mallopt. See below for details. Briefly, supported
787  parameters are as follows (listed defaults are for "typical"
788  configurations).
789 
790  Symbol param # default allowed param values
791  M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
792  M_GRANULARITY -2 page size any power of 2 >= page size
793  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
794 */
795  int dlmallopt (int, int);
796 
797 /*
798  malloc_footprint();
799  Returns the number of bytes obtained from the system. The total
800  number of bytes allocated by malloc, realloc etc., is less than this
801  value. Unlike mallinfo, this function returns only a precomputed
802  result, so can be called frequently to monitor memory consumption.
803  Even if locks are otherwise defined, this function does not use them,
804  so results might not be up to date.
805 */
806  size_t dlmalloc_footprint (void);
807 
808 /*
809  malloc_max_footprint();
810  Returns the maximum number of bytes obtained from the system. This
811  value will be greater than current footprint if deallocated space
812  has been reclaimed by the system. The peak number of bytes allocated
813  by malloc, realloc etc., is less than this value. Unlike mallinfo,
814  this function returns only a precomputed result, so can be called
815  frequently to monitor memory consumption. Even if locks are
816  otherwise defined, this function does not use them, so results might
817  not be up to date.
818 */
819  size_t dlmalloc_max_footprint (void);
820 
821 #if !NO_MALLINFO
822 /*
823  mallinfo()
824  Returns (by copy) a struct containing various summary statistics:
825 
826  arena: current total non-mmapped bytes allocated from system
827  ordblks: the number of free chunks
828  smblks: always zero.
829  hblks: current number of mmapped regions
830  hblkhd: total bytes held in mmapped regions
831  usmblks: the maximum total allocated space. This will be greater
832  than current total if trimming has occurred.
833  fsmblks: always zero
834  uordblks: current total allocated space (normal or mmapped)
835  fordblks: total free space
836  keepcost: the maximum number of bytes that could ideally be released
837  back to system via malloc_trim. ("ideally" means that
838  it ignores page restrictions etc.)
839 
840  Because these fields are ints, but internal bookkeeping may
841  be kept as longs, the reported values may wrap around zero and
842  thus be inaccurate.
843 */
844  struct mallinfo dlmallinfo (void);
845 #endif /* NO_MALLINFO */
846 
847 /*
848  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
849 
850  independent_calloc is similar to calloc, but instead of returning a
851  single cleared space, it returns an array of pointers to n_elements
852  independent elements that can hold contents of size elem_size, each
853  of which starts out cleared, and can be independently freed,
854  realloc'ed etc. The elements are guaranteed to be adjacently
855  allocated (this is not guaranteed to occur with multiple callocs or
856  mallocs), which may also improve cache locality in some
857  applications.
858 
859  The "chunks" argument is optional (i.e., may be null, which is
860  probably the most typical usage). If it is null, the returned array
861  is itself dynamically allocated and should also be freed when it is
862  no longer needed. Otherwise, the chunks array must be of at least
863  n_elements in length. It is filled in with the pointers to the
864  chunks.
865 
866  In either case, independent_calloc returns this pointer array, or
867  null if the allocation failed. If n_elements is zero and "chunks"
868  is null, it returns a chunk representing an array with zero elements
869  (which should be freed if not wanted).
870 
871  Each element must be individually freed when it is no longer
872  needed. If you'd like to instead be able to free all at once, you
873  should instead use regular calloc and assign pointers into this
874  space to represent elements. (In this case though, you cannot
875  independently free elements.)
876 
877  independent_calloc simplifies and speeds up implementations of many
878  kinds of pools. It may also be useful when constructing large data
879  structures that initially have a fixed number of fixed-sized nodes,
880  but the number is not known at compile time, and some of the nodes
881  may later need to be freed. For example:
882 
883  struct Node { int item; struct Node* next; };
884 
885  struct Node* build_list() {
886  struct Node** pool;
887  int n = read_number_of_nodes_needed();
888  if (n <= 0) return 0;
889  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
890  if (pool == 0) die();
891  // organize into a linked list...
892  struct Node* first = pool[0];
893  for (i = 0; i < n-1; ++i)
894  pool[i]->next = pool[i+1];
895  free(pool); // Can now free the array (or not, if it is needed later)
896  return first;
897  }
898 */
899  void **dlindependent_calloc (size_t, size_t, void **);
900 
901 /*
902  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
903 
904  independent_comalloc allocates, all at once, a set of n_elements
905  chunks with sizes indicated in the "sizes" array. It returns
906  an array of pointers to these elements, each of which can be
907  independently freed, realloc'ed etc. The elements are guaranteed to
908  be adjacently allocated (this is not guaranteed to occur with
909  multiple callocs or mallocs), which may also improve cache locality
910  in some applications.
911 
912  The "chunks" argument is optional (i.e., may be null). If it is null
913  the returned array is itself dynamically allocated and should also
914  be freed when it is no longer needed. Otherwise, the chunks array
915  must be of at least n_elements in length. It is filled in with the
916  pointers to the chunks.
917 
918  In either case, independent_comalloc returns this pointer array, or
919  null if the allocation failed. If n_elements is zero and chunks is
920  null, it returns a chunk representing an array with zero elements
921  (which should be freed if not wanted).
922 
923  Each element must be individually freed when it is no longer
924  needed. If you'd like to instead be able to free all at once, you
925  should instead use a single regular malloc, and assign pointers at
926  particular offsets in the aggregate space. (In this case though, you
927  cannot independently free elements.)
928 
929  independent_comallac differs from independent_calloc in that each
930  element may have a different size, and also that it does not
931  automatically clear elements.
932 
933  independent_comalloc can be used to speed up allocation in cases
934  where several structs or objects must always be allocated at the
935  same time. For example:
936 
937  struct Head { ... }
938  struct Foot { ... }
939 
940  void send_message(char* msg) {
941  int msglen = strlen(msg);
942  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
943  void* chunks[3];
944  if (independent_comalloc(3, sizes, chunks) == 0)
945  die();
946  struct Head* head = (struct Head*)(chunks[0]);
947  char* body = (char*)(chunks[1]);
948  struct Foot* foot = (struct Foot*)(chunks[2]);
949  // ...
950  }
951 
952  In general though, independent_comalloc is worth using only for
953  larger values of n_elements. For small values, you probably won't
954  detect enough difference from series of malloc calls to bother.
955 
956  Overuse of independent_comalloc can increase overall memory usage,
957  since it cannot reuse existing noncontiguous small chunks that
958  might be available for some of the elements.
959 */
960  void **dlindependent_comalloc (size_t, size_t *, void **);
961 
962 
963 /*
964  pvalloc(size_t n);
965  Equivalent to valloc(minimum-page-that-holds(n)), that is,
966  round up n to nearest pagesize.
967  */
968  void *dlpvalloc (size_t);
969 
970 /*
971  malloc_trim(size_t pad);
972 
973  If possible, gives memory back to the system (via negative arguments
974  to sbrk) if there is unused memory at the `high' end of the malloc
975  pool or in unused MMAP segments. You can call this after freeing
976  large blocks of memory to potentially reduce the system-level memory
977  requirements of a program. However, it cannot guarantee to reduce
978  memory. Under some allocation patterns, some large free blocks of
979  memory will be locked between two used chunks, so they cannot be
980  given back to the system.
981 
982  The `pad' argument to malloc_trim represents the amount of free
983  trailing space to leave untrimmed. If this argument is zero, only
984  the minimum amount of memory to maintain internal data structures
985  will be left. Non-zero arguments can be supplied to maintain enough
986  trailing space to service future expected allocations without having
987  to re-obtain memory from the system.
988 
989  Malloc_trim returns 1 if it actually released any memory, else 0.
990 */
991  int dlmalloc_trim (size_t);
992 
993 /*
994  malloc_usable_size(void* p);
995 
996  Returns the number of bytes you can actually use in
997  an allocated chunk, which may be more than you requested (although
998  often not) due to alignment and minimum size constraints.
999  You can use this many bytes without worrying about
1000  overwriting other allocated objects. This is not a particularly great
1001  programming practice. malloc_usable_size can be more useful in
1002  debugging and assertions, for example:
1003 
1004  p = malloc(n);
1005  assert(malloc_usable_size(p) >= 256);
1006 */
1007  size_t dlmalloc_usable_size (void *);
1008 
1009 /*
1010  malloc_stats();
1011  Prints on stderr the amount of space obtained from the system (both
1012  via sbrk and mmap), the maximum amount (which may be more than
1013  current if malloc_trim and/or munmap got called), and the current
1014  number of bytes allocated via malloc (or realloc, etc) but not yet
1015  freed. Note that this is the number of bytes allocated, not the
1016  number requested. It will be larger than the number requested
1017  because of alignment and bookkeeping overhead. Because it includes
1018  alignment wastage as being in use, this figure may be greater than
1019  zero even when no user-level chunks are allocated.
1020 
1021  The reported current and maximum system memory can be inaccurate if
1022  a program makes other calls to system memory allocation functions
1023  (normally sbrk) outside of malloc.
1024 
1025  malloc_stats prints only the most commonly interesting statistics.
1026  More information can be obtained by calling mallinfo.
1027 */
1028  void dlmalloc_stats (void);
1029 
1030 #endif /* ONLY_MSPACES */
1031 
1032 #if MSPACES
1033 
1034 /*
1035  mspace is an opaque type representing an independent
1036  region of space that supports mspace_malloc, etc.
1037 */
1038  typedef void *mspace;
1039 
1040 /*
1041  create_mspace creates and returns a new independent space with the
1042  given initial capacity, or, if 0, the default granularity size. It
1043  returns null if there is no system memory available to create the
1044  space. If argument locked is non-zero, the space uses a separate
1045  lock to control access. The capacity of the space will grow
1046  dynamically as needed to service mspace_malloc requests. You can
1047  control the sizes of incremental increases of this space by
1048  compiling with a different DEFAULT_GRANULARITY or dynamically
1049  setting with mallopt(M_GRANULARITY, value).
1050 */
1051  mspace create_mspace (size_t capacity, int locked);
1052 
1053 /*
1054  destroy_mspace destroys the given space, and attempts to return all
1055  of its memory back to the system, returning the total number of
1056  bytes freed. After destruction, the results of access to all memory
1057  used by the space become undefined.
1058 */
1059  size_t destroy_mspace (mspace msp);
1060 
1061 /*
1062  create_mspace_with_base uses the memory supplied as the initial base
1063  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1064  space is used for bookkeeping, so the capacity must be at least this
1065  large. (Otherwise 0 is returned.) When this initial space is
1066  exhausted, additional memory will be obtained from the system.
1067  Destroying this space will deallocate all additionally allocated
1068  space (if possible) but not the initial base.
1069 */
1070  mspace create_mspace_with_base (void *base, size_t capacity, int locked);
1071 
1072 /*
1073  mspace_malloc behaves as malloc, but operates within
1074  the given space.
1075 */
1076  void *mspace_malloc (mspace msp, size_t bytes);
1077 
1078 /*
1079  mspace_free behaves as free, but operates within
1080  the given space.
1081 
1082  If compiled with FOOTERS==1, mspace_free is not actually needed.
1083  free may be called instead of mspace_free because freed chunks from
1084  any space are handled by their originating spaces.
1085 */
1086  void mspace_free (mspace msp, void *mem);
1087 
1088 /*
1089  mspace_realloc behaves as realloc, but operates within
1090  the given space.
1091 
1092  If compiled with FOOTERS==1, mspace_realloc is not actually
1093  needed. realloc may be called instead of mspace_realloc because
1094  realloced chunks from any space are handled by their originating
1095  spaces.
1096 */
1097  void *mspace_realloc (mspace msp, void *mem, size_t newsize);
1098 
1099 /*
1100  mspace_calloc behaves as calloc, but operates within
1101  the given space.
1102 */
1103  void *mspace_calloc (mspace msp, size_t n_elements, size_t elem_size);
1104 
1105 /*
1106  mspace_memalign behaves as memalign, but operates within
1107  the given space.
1108 */
1109  void *mspace_memalign (mspace msp, size_t alignment, size_t bytes);
1110 
1111 /*
1112  mspace_independent_calloc behaves as independent_calloc, but
1113  operates within the given space.
1114 */
1115  void **mspace_independent_calloc (mspace msp, size_t n_elements, size_t elem_size, void *chunks[]);
1116 
1117 /*
1118  mspace_independent_comalloc behaves as independent_comalloc, but
1119  operates within the given space.
1120 */
1121  void **mspace_independent_comalloc (mspace msp, size_t n_elements, size_t sizes[], void *chunks[]);
1122 
1123 /*
1124  mspace_footprint() returns the number of bytes obtained from the
1125  system for this space.
1126 */
1127  size_t mspace_footprint (mspace msp);
1128 
1129 /*
1130  mspace_max_footprint() returns the peak number of bytes obtained from the
1131  system for this space.
1132 */
1133  size_t mspace_max_footprint (mspace msp);
1134 
1135 
1136 #if !NO_MALLINFO
1137 /*
1138  mspace_mallinfo behaves as mallinfo, but reports properties of
1139  the given space.
1140 */
1141  struct mallinfo mspace_mallinfo (mspace msp);
1142 #endif /* NO_MALLINFO */
1143 
1144 /*
1145  mspace_malloc_stats behaves as malloc_stats, but reports
1146  properties of the given space.
1147 */
1148  void mspace_malloc_stats (mspace msp);
1149 
1150 /*
1151  mspace_trim behaves as malloc_trim, but
1152  operates within the given space.
1153 */
1154  int mspace_trim (mspace msp, size_t pad);
1155 
1156 /*
1157  An alias for mallopt.
1158 */
1159  int mspace_mallopt (int, int);
1160 
1161 #endif /* MSPACES */
1162 
1163 #ifdef __cplusplus
1164 }; /* end of extern "C" */
1165 #endif /* __cplusplus */
1166 
1167 /*
1168  ========================================================================
1169  To make a fully customizable malloc.h header file, cut everything
1170  above this line, put into file malloc.h, edit to suit, and #include it
1171  on the next line, as well as in programs that use this malloc.
1172  ========================================================================
1173 */
1174 
1175 /* #include "malloc.h" */
1176 
1177 /*------------------------------ internal #includes ---------------------- */
1178 
1179 #ifdef WIN32
1180 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1181 #endif /* WIN32 */
1182 
1183 #include <stdio.h> /* for printing in malloc_stats */
1184 
1185 #ifndef LACKS_ERRNO_H
1186 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
1187 #endif /* LACKS_ERRNO_H */
1188 #if FOOTERS
1189 #include <time.h> /* for magic initialization */
1190 #endif /* FOOTERS */
1191 #ifndef LACKS_STDLIB_H
1192 #include <stdlib.h> /* for abort() */
1193 #endif /* LACKS_STDLIB_H */
1194 #ifdef DEBUG
1195 #if ABORT_ON_ASSERT_FAILURE
1196 #define assert(x) if(!(x)) ABORT
1197 #else /* ABORT_ON_ASSERT_FAILURE */
1198 #include <assert.h>
1199 #endif /* ABORT_ON_ASSERT_FAILURE */
1200 #else /* DEBUG */
1201 #ifdef assert
1202 #undef assert
1203 #endif
1204 #define assert(x)
1205 #endif /* DEBUG */
1206 #ifndef LACKS_STRING_H
1207 #include <string.h> /* for memset etc */
1208 #endif /* LACKS_STRING_H */
1209 #if USE_BUILTIN_FFS
1210 #ifndef LACKS_STRINGS_H
1211 #include <strings.h> /* for ffs */
1212 #endif /* LACKS_STRINGS_H */
1213 #endif /* USE_BUILTIN_FFS */
1214 #if HAVE_MMAP
1215 #ifndef LACKS_SYS_MMAN_H
1216 #include <sys/mman.h> /* for mmap */
1217 #endif /* LACKS_SYS_MMAN_H */
1218 #ifndef LACKS_FCNTL_H
1219 #include <fcntl.h>
1220 #endif /* LACKS_FCNTL_H */
1221 #endif /* HAVE_MMAP */
1222 #if HAVE_MORECORE
1223 #ifndef LACKS_UNISTD_H
1224 #include <unistd.h> /* for sbrk */
1225 #else /* LACKS_UNISTD_H */
1226 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1227 extern void *sbrk (ptrdiff_t);
1228 #endif /* FreeBSD etc */
1229 #endif /* LACKS_UNISTD_H */
1230 #endif /* HAVE_MMAP */
1231 
1232 #ifndef WIN32
1233 #ifndef malloc_getpagesize
1234 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1235 # ifndef _SC_PAGE_SIZE
1236 # define _SC_PAGE_SIZE _SC_PAGESIZE
1237 # endif
1238 # endif
1239 # ifdef _SC_PAGE_SIZE
1240 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1241 # else
1242 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1243 extern size_t getpagesize ();
1244 # define malloc_getpagesize getpagesize()
1245 # else
1246 # ifdef WIN32 /* use supplied emulation of getpagesize */
1247 # define malloc_getpagesize getpagesize()
1248 # else
1249 # ifndef LACKS_SYS_PARAM_H
1250 # include <sys/param.h>
1251 # endif
1252 # ifdef EXEC_PAGESIZE
1253 # define malloc_getpagesize EXEC_PAGESIZE
1254 # else
1255 # ifdef NBPG
1256 # ifndef CLSIZE
1257 # define malloc_getpagesize NBPG
1258 # else
1259 # define malloc_getpagesize (NBPG * CLSIZE)
1260 # endif
1261 # else
1262 # ifdef NBPC
1263 # define malloc_getpagesize NBPC
1264 # else
1265 # ifdef PAGESIZE
1266 # define malloc_getpagesize PAGESIZE
1267 # else /* just guess */
1268 # define malloc_getpagesize ((size_t)4096U)
1269 # endif
1270 # endif
1271 # endif
1272 # endif
1273 # endif
1274 # endif
1275 # endif
1276 #endif
1277 #endif
1278 
1279 /* ------------------- size_t and alignment properties -------------------- */
1280 
1281 /* The byte and bit size of a size_t */
1282 #define SIZE_T_SIZE (sizeof(size_t))
1283 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1284 
1285 /* Some constants coerced to size_t */
1286 /* Annoying but necessary to avoid errors on some plaftorms */
1287 #define SIZE_T_ZERO ((size_t)0)
1288 #define SIZE_T_ONE ((size_t)1)
1289 #define SIZE_T_TWO ((size_t)2)
1290 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1291 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1292 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1293 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1294 
1295 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
1296 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1297 
1298 /* True if address a has acceptable alignment */
1299 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1300 
1301 /* the number of bytes to offset an address to align it */
1302 #define align_offset(A)\
1303  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1304  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1305 
1306 /* -------------------------- MMAP preliminaries ------------------------- */
1307 
1308 /*
1309  If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1310  checks to fail so compiler optimizer can delete code rather than
1311  using so many "#if"s.
1312 */
1313 
1314 
1315 /* MORECORE and MMAP must return MFAIL on failure */
1316 #define MFAIL ((void*)(MAX_SIZE_T))
1317 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1318 
1319 #if !HAVE_MMAP
1320 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
1321 #define USE_MMAP_BIT (SIZE_T_ZERO)
1322 #define CALL_MMAP(s) MFAIL
1323 #define CALL_MUNMAP(a, s) (-1)
1324 #define DIRECT_MMAP(s) MFAIL
1325 
1326 #else /* HAVE_MMAP */
1327 #define IS_MMAPPED_BIT (SIZE_T_ONE)
1328 #define USE_MMAP_BIT (SIZE_T_ONE)
1329 
1330 #ifndef USE_MALLOC_INSTEAD
1331 #define USE_MALLOC_INSTEAD 0
1332 #endif
1333 
1334 #if USE_MALLOC_INSTEAD
1335 #define CALL_MMAP(s) system_malloc(s)
1336 #define CALL_MUNMAP(a,s) system_free(a)
1337 #define DIRECT_MMAP(s) system_malloc(s)
1338 #else /* USE_MALLOC_INSTEAD */
1339 #ifndef WIN32
1340 #define CALL_MUNMAP(a, s) munmap((a), (s))
1341 #define MMAP_PROT (PROT_READ|PROT_WRITE)
1342 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1343 #define MAP_ANONYMOUS MAP_ANON
1344 #endif /* MAP_ANON */
1345 #ifdef MAP_ANONYMOUS
1346 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1347 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1348 #else /* MAP_ANONYMOUS */
1349 /*
1350  Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1351  is unlikely to be needed, but is supplied just in case.
1352 */
1353 #define MMAP_FLAGS (MAP_PRIVATE)
1354 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1355 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1356  (dev_zero_fd = open("/dev/zero", O_RDWR), \
1357  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1358  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1359 #endif /* MAP_ANONYMOUS */
1360 
1361 #define DIRECT_MMAP(s) CALL_MMAP(s)
1362 #else /* WIN32 */
1363 
1364 /* Win32 MMAP via VirtualAlloc */
1365 static void *
1366 win32mmap (size_t size)
1367 {
1368  void *ptr = VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1369  return (ptr != 0) ? ptr : MFAIL;
1370 }
1371 
1372 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1373 static void *
1374 win32direct_mmap (size_t size)
1375 {
1376  void *ptr = VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1377  PAGE_READWRITE);
1378  return (ptr != 0) ? ptr : MFAIL;
1379 }
1380 
1381 /* This function supports releasing coalesed segments */
1382 static int
1383 win32munmap (void *ptr, size_t size)
1384 {
1385  MEMORY_BASIC_INFORMATION minfo;
1386  char *cptr = ptr;
1387  while (size)
1388  {
1389  if (VirtualQuery (cptr, &minfo, sizeof (minfo)) == 0)
1390  return -1;
1391  if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1392  minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1393  return -1;
1394  if (VirtualFree (cptr, 0, MEM_RELEASE) == 0)
1395  return -1;
1396  cptr += minfo.RegionSize;
1397  size -= minfo.RegionSize;
1398  }
1399  return 0;
1400 }
1401 
1402 #define CALL_MMAP(s) win32mmap(s)
1403 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
1404 #define DIRECT_MMAP(s) win32direct_mmap(s)
1405 #endif /* WIN32 */
1406 #endif /* USE_MALLOC_INSTEAD */
1407 #endif /* HAVE_MMAP */
1408 
1409 #if HAVE_MMAP && HAVE_MREMAP
1410 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1411 #else /* HAVE_MMAP && HAVE_MREMAP */
1412 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1413 #endif /* HAVE_MMAP && HAVE_MREMAP */
1414 
1415 #if HAVE_MORECORE
1416 #define CALL_MORECORE(S) MORECORE(S)
1417 #else /* HAVE_MORECORE */
1418 #define CALL_MORECORE(S) MFAIL
1419 #endif /* HAVE_MORECORE */
1420 
1421 /* mstate bit set if continguous morecore disabled or failed */
1422 #define USE_NONCONTIGUOUS_BIT (4U)
1423 
1424 /* segment bit set in create_mspace_with_base */
1425 #define EXTERN_BIT (8U)
1426 
1427 
1428 /* --------------------------- Lock preliminaries ------------------------ */
1429 
1430 #if USE_LOCKS
1431 
1432 /*
1433  When locks are defined, there are up to two global locks:
1434 
1435  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
1436  MORECORE. In many cases sys_alloc requires two calls, that should
1437  not be interleaved with calls by other threads. This does not
1438  protect against direct calls to MORECORE by other threads not
1439  using this lock, so there is still code to cope the best we can on
1440  interference.
1441 
1442  * magic_init_mutex ensures that mparams.magic and other
1443  unique mparams values are initialized only once.
1444 */
1445 
1446 #ifndef WIN32
1447 /* By default use posix locks */
1448 #include <pthread.h>
1449 #define MLOCK_T pthread_mutex_t
1450 #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
1451 #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
1452 #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
1453 
1454 #if HAVE_MORECORE
1455 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
1456 #endif /* HAVE_MORECORE */
1457 
1458 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
1459 
1460 #else /* WIN32 */
1461 /*
1462  Because lock-protected regions have bounded times, and there
1463  are no recursive lock calls, we can use simple spinlocks.
1464 */
1465 
1466 #define MLOCK_T long
1467 static int
1468 win32_acquire_lock (MLOCK_T * sl)
1469 {
1470  for (;;)
1471  {
1472 #ifdef InterlockedCompareExchangePointer
1473  if (!InterlockedCompareExchange (sl, 1, 0))
1474  return 0;
1475 #else /* Use older void* version */
1476  if (!InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0))
1477  return 0;
1478 #endif /* InterlockedCompareExchangePointer */
1479  Sleep (0);
1480  }
1481 }
1482 
1483 static void
1484 win32_release_lock (MLOCK_T * sl)
1485 {
1486  InterlockedExchange (sl, 0);
1487 }
1488 
1489 #define INITIAL_LOCK(l) *(l)=0
1490 #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1491 #define RELEASE_LOCK(l) win32_release_lock(l)
1492 #if HAVE_MORECORE
1493 static MLOCK_T morecore_mutex;
1494 #endif /* HAVE_MORECORE */
1495 static MLOCK_T magic_init_mutex;
1496 #endif /* WIN32 */
1497 
1498 #define USE_LOCK_BIT (2U)
1499 #else /* USE_LOCKS */
1500 #define USE_LOCK_BIT (0U)
1501 #define INITIAL_LOCK(l)
1502 #endif /* USE_LOCKS */
1503 
1504 #if USE_LOCKS && HAVE_MORECORE
1505 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1506 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1507 #else /* USE_LOCKS && HAVE_MORECORE */
1508 #define ACQUIRE_MORECORE_LOCK()
1509 #define RELEASE_MORECORE_LOCK()
1510 #endif /* USE_LOCKS && HAVE_MORECORE */
1511 
1512 #if USE_LOCKS
1513 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1514 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1515 #else /* USE_LOCKS */
1516 #define ACQUIRE_MAGIC_INIT_LOCK()
1517 #define RELEASE_MAGIC_INIT_LOCK()
1518 #endif /* USE_LOCKS */
1519 
1520 
1521 /* ----------------------- Chunk representations ------------------------ */
1522 
1523 /*
1524  (The following includes lightly edited explanations by Colin Plumb.)
1525 
1526  The malloc_chunk declaration below is misleading (but accurate and
1527  necessary). It declares a "view" into memory allowing access to
1528  necessary fields at known offsets from a given base.
1529 
1530  Chunks of memory are maintained using a `boundary tag' method as
1531  originally described by Knuth. (See the paper by Paul Wilson
1532  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
1533  techniques.) Sizes of free chunks are stored both in the front of
1534  each chunk and at the end. This makes consolidating fragmented
1535  chunks into bigger chunks fast. The head fields also hold bits
1536  representing whether chunks are free or in use.
1537 
1538  Here are some pictures to make it clearer. They are "exploded" to
1539  show that the state of a chunk can be thought of as extending from
1540  the high 31 bits of the head field of its header through the
1541  prev_foot and PINUSE_BIT bit of the following chunk header.
1542 
1543  A chunk that's in use looks like:
1544 
1545  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1546  | Size of previous chunk (if P = 1) |
1547  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1548  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1549  | Size of this chunk 1| +-+
1550  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1551  | |
1552  +- -+
1553  | |
1554  +- -+
1555  | :
1556  +- size - sizeof(size_t) available payload bytes -+
1557  : |
1558  chunk-> +- -+
1559  | |
1560  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1561  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
1562  | Size of next chunk (may or may not be in use) | +-+
1563  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1564 
1565  And if it's free, it looks like this:
1566 
1567  chunk-> +- -+
1568  | User payload (must be in use, or we would have merged!) |
1569  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1570  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1571  | Size of this chunk 0| +-+
1572  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1573  | Next pointer |
1574  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1575  | Prev pointer |
1576  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1577  | :
1578  +- size - sizeof(struct chunk) unused bytes -+
1579  : |
1580  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1581  | Size of this chunk |
1582  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1583  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
1584  | Size of next chunk (must be in use, or we would have merged)| +-+
1585  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1586  | :
1587  +- User payload -+
1588  : |
1589  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1590  |0|
1591  +-+
1592  Note that since we always merge adjacent free chunks, the chunks
1593  adjacent to a free chunk must be in use.
1594 
1595  Given a pointer to a chunk (which can be derived trivially from the
1596  payload pointer) we can, in O(1) time, find out whether the adjacent
1597  chunks are free, and if so, unlink them from the lists that they
1598  are on and merge them with the current chunk.
1599 
1600  Chunks always begin on even word boundaries, so the mem portion
1601  (which is returned to the user) is also on an even word boundary, and
1602  thus at least double-word aligned.
1603 
1604  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
1605  chunk size (which is always a multiple of two words), is an in-use
1606  bit for the *previous* chunk. If that bit is *clear*, then the
1607  word before the current chunk size contains the previous chunk
1608  size, and can be used to find the front of the previous chunk.
1609  The very first chunk allocated always has this bit set, preventing
1610  access to non-existent (or non-owned) memory. If pinuse is set for
1611  any given chunk, then you CANNOT determine the size of the
1612  previous chunk, and might even get a memory addressing fault when
1613  trying to do so.
1614 
1615  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
1616  the chunk size redundantly records whether the current chunk is
1617  inuse. This redundancy enables usage checks within free and realloc,
1618  and reduces indirection when freeing and consolidating chunks.
1619 
1620  Each freshly allocated chunk must have both cinuse and pinuse set.
1621  That is, each allocated chunk borders either a previously allocated
1622  and still in-use chunk, or the base of its memory arena. This is
1623  ensured by making all allocations from the the `lowest' part of any
1624  found chunk. Further, no free chunk physically borders another one,
1625  so each free chunk is known to be preceded and followed by either
1626  inuse chunks or the ends of memory.
1627 
1628  Note that the `foot' of the current chunk is actually represented
1629  as the prev_foot of the NEXT chunk. This makes it easier to
1630  deal with alignments etc but can be very confusing when trying
1631  to extend or adapt this code.
1632 
1633  The exceptions to all this are
1634 
1635  1. The special chunk `top' is the top-most available chunk (i.e.,
1636  the one bordering the end of available memory). It is treated
1637  specially. Top is never included in any bin, is used only if
1638  no other chunk is available, and is released back to the
1639  system if it is very large (see M_TRIM_THRESHOLD). In effect,
1640  the top chunk is treated as larger (and thus less well
1641  fitting) than any other available chunk. The top chunk
1642  doesn't update its trailing size field since there is no next
1643  contiguous chunk that would have to index off it. However,
1644  space is still allocated for it (TOP_FOOT_SIZE) to enable
1645  separation or merging when space is extended.
1646 
1647  3. Chunks allocated via mmap, which have the lowest-order bit
1648  (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
1649  PINUSE_BIT in their head fields. Because they are allocated
1650  one-by-one, each must carry its own prev_foot field, which is
1651  also used to hold the offset this chunk has within its mmapped
1652  region, which is needed to preserve alignment. Each mmapped
1653  chunk is trailed by the first two fields of a fake next-chunk
1654  for sake of usage checks.
1655 
1656 */
1657 
1659 {
1660  size_t prev_foot; /* Size of previous chunk (if free). */
1661  size_t head; /* Size and inuse bits. */
1662  struct malloc_chunk *fd; /* double links -- used only if free. */
1663  struct malloc_chunk *bk;
1664 };
1665 
1666 typedef struct malloc_chunk mchunk;
1667 typedef struct malloc_chunk *mchunkptr;
1668 typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
1669 typedef unsigned int bindex_t; /* Described below */
1670 typedef unsigned int binmap_t; /* Described below */
1671 typedef unsigned int flag_t; /* The type of various bit flag sets */
1672 
1673 /* ------------------- Chunks sizes and alignments ----------------------- */
1674 
1675 #define MCHUNK_SIZE (sizeof(mchunk))
1676 
1677 #if FOOTERS
1678 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1679 #else /* FOOTERS */
1680 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
1681 #endif /* FOOTERS */
1682 
1683 /* MMapped chunks need a second word of overhead ... */
1684 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1685 /* ... and additional padding for fake next-chunk at foot */
1686 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1687 
1688 /* The smallest size we can malloc is an aligned minimal chunk */
1689 #define MIN_CHUNK_SIZE\
1690  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1691 
1692 /* conversion from malloc headers to user pointers, and back */
1693 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1694 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1695 /* chunk associated with aligned address A */
1696 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1697 
1698 /* Bounds on request (not chunk) sizes. */
1699 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1700 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1701 
1702 /* pad request bytes into a usable size */
1703 #define pad_request(req) \
1704  (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1705 
1706 /* pad request, checking for minimum (but not maximum) */
1707 #define request2size(req) \
1708  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1709 
1710 
1711 /* ------------------ Operations on head and foot fields ----------------- */
1712 
1713 /*
1714  The head field of a chunk is or'ed with PINUSE_BIT when previous
1715  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
1716  use. If the chunk was obtained with mmap, the prev_foot field has
1717  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
1718  mmapped region to the base of the chunk.
1719 */
1720 
1721 #define PINUSE_BIT (SIZE_T_ONE)
1722 #define CINUSE_BIT (SIZE_T_TWO)
1723 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1724 
1725 /* Head value for fenceposts */
1726 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1727 
1728 /* extraction of fields from head words */
1729 #define cinuse(p) ((p)->head & CINUSE_BIT)
1730 #define pinuse(p) ((p)->head & PINUSE_BIT)
1731 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
1732 
1733 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1734 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1735 
1736 /* Treat space at ptr +/- offset as a chunk */
1737 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1738 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
1739 
1740 /* Ptr to next or previous physical malloc_chunk. */
1741 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
1742 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
1743 
1744 /* extract next chunk's pinuse bit */
1745 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
1746 
1747 /* Get/set size at footer */
1748 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
1749 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
1750 
1751 /* Set size, pinuse bit, and foot */
1752 #define set_size_and_pinuse_of_free_chunk(p, s)\
1753  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
1754 
1755 /* Set size, pinuse bit, foot, and clear next pinuse */
1756 #define set_free_with_pinuse(p, s, n)\
1757  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
1758 
1759 #define is_mmapped(p)\
1760  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
1761 
1762 /* Get the internal overhead associated with chunk p */
1763 #define overhead_for(p)\
1764  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
1765 
1766 /* Return true if malloced space is not necessarily cleared */
1767 #if MMAP_CLEARS
1768 #define calloc_must_clear(p) (!is_mmapped(p))
1769 #else /* MMAP_CLEARS */
1770 #define calloc_must_clear(p) (1)
1771 #endif /* MMAP_CLEARS */
1772 
1773 /* ---------------------- Overlaid data structures ----------------------- */
1774 
1775 /*
1776  When chunks are not in use, they are treated as nodes of either
1777  lists or trees.
1778 
1779  "Small" chunks are stored in circular doubly-linked lists, and look
1780  like this:
1781 
1782  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1783  | Size of previous chunk |
1784  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1785  `head:' | Size of chunk, in bytes |P|
1786  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1787  | Forward pointer to next chunk in list |
1788  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1789  | Back pointer to previous chunk in list |
1790  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1791  | Unused space (may be 0 bytes long) .
1792  . .
1793  . |
1794 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1795  `foot:' | Size of chunk, in bytes |
1796  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1797 
1798  Larger chunks are kept in a form of bitwise digital trees (aka
1799  tries) keyed on chunksizes. Because malloc_tree_chunks are only for
1800  free chunks greater than 256 bytes, their size doesn't impose any
1801  constraints on user chunk sizes. Each node looks like:
1802 
1803  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1804  | Size of previous chunk |
1805  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1806  `head:' | Size of chunk, in bytes |P|
1807  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1808  | Forward pointer to next chunk of same size |
1809  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1810  | Back pointer to previous chunk of same size |
1811  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1812  | Pointer to left child (child[0]) |
1813  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1814  | Pointer to right child (child[1]) |
1815  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1816  | Pointer to parent |
1817  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1818  | bin index of this chunk |
1819  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1820  | Unused space .
1821  . |
1822 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1823  `foot:' | Size of chunk, in bytes |
1824  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1825 
1826  Each tree holding treenodes is a tree of unique chunk sizes. Chunks
1827  of the same size are arranged in a circularly-linked list, with only
1828  the oldest chunk (the next to be used, in our FIFO ordering)
1829  actually in the tree. (Tree members are distinguished by a non-null
1830  parent pointer.) If a chunk with the same size an an existing node
1831  is inserted, it is linked off the existing node using pointers that
1832  work in the same way as fd/bk pointers of small chunks.
1833 
1834  Each tree contains a power of 2 sized range of chunk sizes (the
1835  smallest is 0x100 <= x < 0x180), which is is divided in half at each
1836  tree level, with the chunks in the smaller half of the range (0x100
1837  <= x < 0x140 for the top nose) in the left subtree and the larger
1838  half (0x140 <= x < 0x180) in the right subtree. This is, of course,
1839  done by inspecting individual bits.
1840 
1841  Using these rules, each node's left subtree contains all smaller
1842  sizes than its right subtree. However, the node at the root of each
1843  subtree has no particular ordering relationship to either. (The
1844  dividing line between the subtree sizes is based on trie relation.)
1845  If we remove the last chunk of a given size from the interior of the
1846  tree, we need to replace it with a leaf node. The tree ordering
1847  rules permit a node to be replaced by any leaf below it.
1848 
1849  The smallest chunk in a tree (a common operation in a best-fit
1850  allocator) can be found by walking a path to the leftmost leaf in
1851  the tree. Unlike a usual binary tree, where we follow left child
1852  pointers until we reach a null, here we follow the right child
1853  pointer any time the left one is null, until we reach a leaf with
1854  both child pointers null. The smallest chunk in the tree will be
1855  somewhere along that path.
1856 
1857  The worst case number of steps to add, find, or remove a node is
1858  bounded by the number of bits differentiating chunks within
1859  bins. Under current bin calculations, this ranges from 6 up to 21
1860  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1861  is of course much better.
1862 */
1863 
1865 {
1866  /* The first four fields must be compatible with malloc_chunk */
1867  size_t prev_foot;
1868  size_t head;
1871 
1875 };
1876 
1877 typedef struct malloc_tree_chunk tchunk;
1879 typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
1880 
1881 /* A little helper macro for trees */
1882 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1883 
1884 /* ----------------------------- Segments -------------------------------- */
1885 
1886 /*
1887  Each malloc space may include non-contiguous segments, held in a
1888  list headed by an embedded malloc_segment record representing the
1889  top-most space. Segments also include flags holding properties of
1890  the space. Large chunks that are directly allocated by mmap are not
1891  included in this list. They are instead independently created and
1892  destroyed without otherwise keeping track of them.
1893 
1894  Segment management mainly comes into play for spaces allocated by
1895  MMAP. Any call to MMAP might or might not return memory that is
1896  adjacent to an existing segment. MORECORE normally contiguously
1897  extends the current space, so this space is almost always adjacent,
1898  which is simpler and faster to deal with. (This is why MORECORE is
1899  used preferentially to MMAP when both are available -- see
1900  sys_alloc.) When allocating using MMAP, we don't use any of the
1901  hinting mechanisms (inconsistently) supported in various
1902  implementations of unix mmap, or distinguish reserving from
1903  committing memory. Instead, we just ask for space, and exploit
1904  contiguity when we get it. It is probably possible to do
1905  better than this on some systems, but no general scheme seems
1906  to be significantly better.
1907 
1908  Management entails a simpler variant of the consolidation scheme
1909  used for chunks to reduce fragmentation -- new adjacent memory is
1910  normally prepended or appended to an existing segment. However,
1911  there are limitations compared to chunk consolidation that mostly
1912  reflect the fact that segment processing is relatively infrequent
1913  (occurring only when getting memory from system) and that we
1914  don't expect to have huge numbers of segments:
1915 
1916  * Segments are not indexed, so traversal requires linear scans. (It
1917  would be possible to index these, but is not worth the extra
1918  overhead and complexity for most programs on most platforms.)
1919  * New segments are only appended to old ones when holding top-most
1920  memory; if they cannot be prepended to others, they are held in
1921  different segments.
1922 
1923  Except for the top-most segment of an mstate, each segment record
1924  is kept at the tail of its segment. Segments are added by pushing
1925  segment records onto the list headed by &mstate.seg for the
1926  containing mstate.
1927 
1928  Segment flags control allocation/merge/deallocation policies:
1929  * If EXTERN_BIT set, then we did not allocate this segment,
1930  and so should not try to deallocate or merge with others.
1931  (This currently holds only for the initial segment passed
1932  into create_mspace_with_base.)
1933  * If IS_MMAPPED_BIT set, the segment may be merged with
1934  other surrounding mmapped segments and trimmed/de-allocated
1935  using munmap.
1936  * If neither bit is set, then the segment was obtained using
1937  MORECORE so can be merged with surrounding MORECORE'd segments
1938  and deallocated/trimmed using MORECORE with negative arguments.
1939 */
1940 
1942 {
1943  char *base; /* base address */
1944  size_t size; /* allocated size */
1945  struct malloc_segment *next; /* ptr to next segment */
1946  flag_t sflags; /* mmap and extern flag */
1947 };
1948 
1949 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
1950 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1951 
1952 typedef struct malloc_segment msegment;
1953 typedef struct malloc_segment *msegmentptr;
1954 
1955 /* ---------------------------- malloc_state ----------------------------- */
1956 
1957 /*
1958  A malloc_state holds all of the bookkeeping for a space.
1959  The main fields are:
1960 
1961  Top
1962  The topmost chunk of the currently active segment. Its size is
1963  cached in topsize. The actual size of topmost space is
1964  topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1965  fenceposts and segment records if necessary when getting more
1966  space from the system. The size at which to autotrim top is
1967  cached from mparams in trim_check, except that it is disabled if
1968  an autotrim fails.
1969 
1970  Designated victim (dv)
1971  This is the preferred chunk for servicing small requests that
1972  don't have exact fits. It is normally the chunk split off most
1973  recently to service another small request. Its size is cached in
1974  dvsize. The link fields of this chunk are not maintained since it
1975  is not kept in a bin.
1976 
1977  SmallBins
1978  An array of bin headers for free chunks. These bins hold chunks
1979  with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1980  chunks of all the same size, spaced 8 bytes apart. To simplify
1981  use in double-linked lists, each bin header acts as a malloc_chunk
1982  pointing to the real first node, if it exists (else pointing to
1983  itself). This avoids special-casing for headers. But to avoid
1984  waste, we allocate only the fd/bk pointers of bins, and then use
1985  repositioning tricks to treat these as the fields of a chunk.
1986 
1987  TreeBins
1988  Treebins are pointers to the roots of trees holding a range of
1989  sizes. There are 2 equally spaced treebins for each power of two
1990  from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1991  larger.
1992 
1993  Bin maps
1994  There is one bit map for small bins ("smallmap") and one for
1995  treebins ("treemap). Each bin sets its bit when non-empty, and
1996  clears the bit when empty. Bit operations are then used to avoid
1997  bin-by-bin searching -- nearly all "search" is done without ever
1998  looking at bins that won't be selected. The bit maps
1999  conservatively use 32 bits per map word, even if on 64bit system.
2000  For a good description of some of the bit-based techniques used
2001  here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2002  supplement at http://hackersdelight.org/). Many of these are
2003  intended to reduce the branchiness of paths through malloc etc, as
2004  well as to reduce the number of memory locations read or written.
2005 
2006  Segments
2007  A list of segments headed by an embedded malloc_segment record
2008  representing the initial space.
2009 
2010  Address check support
2011  The least_addr field is the least address ever obtained from
2012  MORECORE or MMAP. Attempted frees and reallocs of any address less
2013  than this are trapped (unless INSECURE is defined).
2014 
2015  Magic tag
2016  A cross-check field that should always hold same value as mparams.magic.
2017 
2018  Flags
2019  Bits recording whether to use MMAP, locks, or contiguous MORECORE
2020 
2021  Statistics
2022  Each space keeps track of current and maximum system memory
2023  obtained via MORECORE or MMAP.
2024 
2025  Locking
2026  If USE_LOCKS is defined, the "mutex" lock is acquired and released
2027  around every public call using this mspace.
2028 */
2029 
2030 /* Bin types, widths and sizes */
2031 #define NSMALLBINS (32U)
2032 #define NTREEBINS (32U)
2033 #define SMALLBIN_SHIFT (3U)
2034 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2035 #define TREEBIN_SHIFT (8U)
2036 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2037 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2038 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2039 
2041 {
2044  size_t dvsize;
2045  size_t topsize;
2046  char *least_addr;
2047  mchunkptr dv;
2048  mchunkptr top;
2049  size_t trim_check;
2050  size_t magic;
2051  mchunkptr smallbins[(NSMALLBINS + 1) * 2];
2052  tbinptr treebins[NTREEBINS];
2053  size_t footprint;
2056 #if USE_LOCKS
2057  MLOCK_T mutex; /* locate lock among fields that rarely change */
2058 #endif /* USE_LOCKS */
2060 };
2061 
2062 typedef struct malloc_state *mstate;
2063 
2064 /* ------------- Global malloc_state and malloc_params ------------------- */
2065 
2066 /*
2067  malloc_params holds global properties, including those that can be
2068  dynamically set using mallopt. There is a single instance, mparams,
2069  initialized in init_mparams.
2070 */
2071 
2073 {
2074  size_t magic;
2075  size_t page_size;
2076  size_t granularity;
2080 };
2081 
2082 static struct malloc_params mparams;
2083 
2084 /* The global malloc_state used for all non-"mspace" calls */
2085 static struct malloc_state _gm_;
2086 #define gm (&_gm_)
2087 #define is_global(M) ((M) == &_gm_)
2088 #define is_initialized(M) ((M)->top != 0)
2089 
2090 /* -------------------------- system alloc setup ------------------------- */
2091 
2092 /* Operations on mflags */
2093 
2094 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2095 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2096 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2097 
2098 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2099 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2100 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2101 
2102 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2103 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2104 
2105 #define set_lock(M,L)\
2106  ((M)->mflags = (L)?\
2107  ((M)->mflags | USE_LOCK_BIT) :\
2108  ((M)->mflags & ~USE_LOCK_BIT))
2109 
2110 /* page-align a size */
2111 #define page_align(S)\
2112  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
2113 
2114 /* granularity-align a size */
2115 #define granularity_align(S)\
2116  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
2117 
2118 #define is_page_aligned(S)\
2119  (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2120 #define is_granularity_aligned(S)\
2121  (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2122 
2123 /* True if segment S holds address A */
2124 #define segment_holds(S, A)\
2125  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2126 
2127 /* Return segment holding given address */
2128 static msegmentptr
2129 segment_holding (mstate m, char *addr)
2130 {
2131  msegmentptr sp = &m->seg;
2132  for (;;)
2133  {
2134  if (addr >= sp->base && addr < sp->base + sp->size)
2135  return sp;
2136  if ((sp = sp->next) == 0)
2137  return 0;
2138  }
2139 }
2140 
2141 /* Return true if segment contains a segment link */
2142 static int
2143 has_segment_link (mstate m, msegmentptr ss)
2144 {
2145  msegmentptr sp = &m->seg;
2146  for (;;)
2147  {
2148  if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
2149  return 1;
2150  if ((sp = sp->next) == 0)
2151  return 0;
2152  }
2153 }
2154 
2155 #ifndef MORECORE_CANNOT_TRIM
2156 #define should_trim(M,s) ((s) > (M)->trim_check)
2157 #else /* MORECORE_CANNOT_TRIM */
2158 #define should_trim(M,s) (0)
2159 #endif /* MORECORE_CANNOT_TRIM */
2160 
2161 /*
2162  TOP_FOOT_SIZE is padding at the end of a segment, including space
2163  that may be needed to place segment records and fenceposts when new
2164  noncontiguous segments are added.
2165 */
2166 #define TOP_FOOT_SIZE\
2167  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2168 
2169 
2170 /* ------------------------------- Hooks -------------------------------- */
2171 
2172 /*
2173  PREACTION should be defined to return 0 on success, and nonzero on
2174  failure. If you are not using locking, you can redefine these to do
2175  anything you like.
2176 */
2177 
2178 #if USE_LOCKS
2179 
2180 /* Ensure locks are initialized */
2181 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2182 
2183 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2184 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2185 #else /* USE_LOCKS */
2186 
2187 #ifndef PREACTION
2188 #define PREACTION(M) (0)
2189 #endif /* PREACTION */
2190 
2191 #ifndef POSTACTION
2192 #define POSTACTION(M)
2193 #endif /* POSTACTION */
2194 
2195 #endif /* USE_LOCKS */
2196 
2197 /*
2198  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2199  USAGE_ERROR_ACTION is triggered on detected bad frees and
2200  reallocs. The argument p is an address that might have triggered the
2201  fault. It is ignored by the two predefined actions, but might be
2202  useful in custom actions that try to help diagnose errors.
2203 */
2204 
2205 #if PROCEED_ON_ERROR
2206 
2207 /* A count of the number of corruption errors causing resets */
2208 int malloc_corruption_error_count;
2209 
2210 /* default corruption action */
2211 static void reset_on_error (mstate m);
2212 
2213 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2214 #define USAGE_ERROR_ACTION(m, p)
2215 
2216 #else /* PROCEED_ON_ERROR */
2217 
2218 #ifndef CORRUPTION_ERROR_ACTION
2219 #define CORRUPTION_ERROR_ACTION(m) ABORT
2220 #endif /* CORRUPTION_ERROR_ACTION */
2221 
2222 #ifndef USAGE_ERROR_ACTION
2223 #define USAGE_ERROR_ACTION(m,p) ABORT
2224 #endif /* USAGE_ERROR_ACTION */
2225 
2226 #endif /* PROCEED_ON_ERROR */
2227 
2228 /* -------------------------- Debugging setup ---------------------------- */
2229 
2230 #if ! DEBUG
2231 
2232 #define check_free_chunk(M,P)
2233 #define check_inuse_chunk(M,P)
2234 #define check_malloced_chunk(M,P,N)
2235 #define check_mmapped_chunk(M,P)
2236 #define check_malloc_state(M)
2237 #define check_top_chunk(M,P)
2238 
2239 #else /* DEBUG */
2240 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
2241 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2242 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2243 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2244 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2245 #define check_malloc_state(M) do_check_malloc_state(M)
2246 
2247 static void do_check_any_chunk (mstate m, mchunkptr p);
2248 static void do_check_top_chunk (mstate m, mchunkptr p);
2249 static void do_check_mmapped_chunk (mstate m, mchunkptr p);
2250 static void do_check_inuse_chunk (mstate m, mchunkptr p);
2251 static void do_check_free_chunk (mstate m, mchunkptr p);
2252 static void do_check_malloced_chunk (mstate m, void *mem, size_t s);
2253 static void do_check_tree (mstate m, tchunkptr t);
2254 static void do_check_treebin (mstate m, bindex_t i);
2255 static void do_check_smallbin (mstate m, bindex_t i);
2256 static void do_check_malloc_state (mstate m);
2257 static int bin_find (mstate m, mchunkptr x);
2258 static size_t traverse_and_check (mstate m);
2259 #endif /* DEBUG */
2260 
2261 /* ---------------------------- Indexing Bins ---------------------------- */
2262 
2263 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2264 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
2265 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2266 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2267 
2268 /* addressing by index. See above about smallbin repositioning */
2269 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2270 #define treebin_at(M,i) (&((M)->treebins[i]))
2271 
2272 /* assign tree index for size S to variable I */
2273 #if defined(__GNUC__) && defined(i386)
2274 #define compute_tree_index(S, I)\
2275 {\
2276  size_t X = S >> TREEBIN_SHIFT;\
2277  if (X == 0)\
2278  I = 0;\
2279  else if (X > 0xFFFF)\
2280  I = NTREEBINS-1;\
2281  else {\
2282  unsigned int K;\
2283  __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
2284  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2285  }\
2286 }
2287 #else /* GNUC */
2288 #define compute_tree_index(S, I)\
2289 {\
2290  size_t X = S >> TREEBIN_SHIFT;\
2291  if (X == 0)\
2292  I = 0;\
2293  else if (X > 0xFFFF)\
2294  I = NTREEBINS-1;\
2295  else {\
2296  unsigned int Y = (unsigned int)X;\
2297  unsigned int N = ((Y - 0x100) >> 16) & 8;\
2298  unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2299  N += K;\
2300  N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2301  K = 14 - N + ((Y <<= K) >> 15);\
2302  I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2303  }\
2304 }
2305 #endif /* GNUC */
2306 
2307 /* Bit representing maximum resolved size in a treebin at i */
2308 #define bit_for_tree_index(i) \
2309  (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2310 
2311 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
2312 #define leftshift_for_tree_index(i) \
2313  ((i == NTREEBINS-1)? 0 : \
2314  ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2315 
2316 /* The size of the smallest chunk held in bin with index i */
2317 #define minsize_for_tree_index(i) \
2318  ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2319  (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2320 
2321 
2322 /* ------------------------ Operations on bin maps ----------------------- */
2323 
2324 /* bit corresponding to given index */
2325 #define idx2bit(i) ((binmap_t)(1) << (i))
2326 
2327 /* Mark/Clear bits with given index */
2328 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2329 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2330 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2331 
2332 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2333 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2334 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2335 
2336 /* index corresponding to given bit */
2337 
2338 #if defined(__GNUC__) && defined(i386)
2339 #define compute_bit2idx(X, I)\
2340 {\
2341  unsigned int J;\
2342  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2343  I = (bindex_t)J;\
2344 }
2345 
2346 #else /* GNUC */
2347 #if USE_BUILTIN_FFS
2348 #define compute_bit2idx(X, I) I = ffs(X)-1
2349 
2350 #else /* USE_BUILTIN_FFS */
2351 #define compute_bit2idx(X, I)\
2352 {\
2353  unsigned int Y = X - 1;\
2354  unsigned int K = Y >> (16-4) & 16;\
2355  unsigned int N = K; Y >>= K;\
2356  N += K = Y >> (8-3) & 8; Y >>= K;\
2357  N += K = Y >> (4-2) & 4; Y >>= K;\
2358  N += K = Y >> (2-1) & 2; Y >>= K;\
2359  N += K = Y >> (1-0) & 1; Y >>= K;\
2360  I = (bindex_t)(N + Y);\
2361 }
2362 #endif /* USE_BUILTIN_FFS */
2363 #endif /* GNUC */
2364 
2365 /* isolate the least set bit of a bitmap */
2366 #define least_bit(x) ((x) & -(x))
2367 
2368 /* mask with all bits to left of least bit of x on */
2369 #define left_bits(x) ((x<<1) | -(x<<1))
2370 
2371 /* mask with all bits to left of or equal to least bit of x on */
2372 #define same_or_left_bits(x) ((x) | -(x))
2373 
2374 
2375 /* ----------------------- Runtime Check Support ------------------------- */
2376 
2377 /*
2378  For security, the main invariant is that malloc/free/etc never
2379  writes to a static address other than malloc_state, unless static
2380  malloc_state itself has been corrupted, which cannot occur via
2381  malloc (because of these checks). In essence this means that we
2382  believe all pointers, sizes, maps etc held in malloc_state, but
2383  check all of those linked or offsetted from other embedded data
2384  structures. These checks are interspersed with main code in a way
2385  that tends to minimize their run-time cost.
2386 
2387  When FOOTERS is defined, in addition to range checking, we also
2388  verify footer fields of inuse chunks, which can be used guarantee
2389  that the mstate controlling malloc/free is intact. This is a
2390  streamlined version of the approach described by William Robertson
2391  et al in "Run-time Detection of Heap-based Overflows" LISA'03
2392  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
2393  of an inuse chunk holds the xor of its mstate and a random seed,
2394  that is checked upon calls to free() and realloc(). This is
2395  (probablistically) unguessable from outside the program, but can be
2396  computed by any code successfully malloc'ing any chunk, so does not
2397  itself provide protection against code that has already broken
2398  security through some other means. Unlike Robertson et al, we
2399  always dynamically check addresses of all offset chunks (previous,
2400  next, etc). This turns out to be cheaper than relying on hashes.
2401 */
2402 
2403 #if !INSECURE
2404 /* Check if address a is at least as high as any from MORECORE or MMAP */
2405 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2406 /* Check if address of next chunk n is higher than base chunk p */
2407 #define ok_next(p, n) ((char*)(p) < (char*)(n))
2408 /* Check if p has its cinuse bit on */
2409 #define ok_cinuse(p) cinuse(p)
2410 /* Check if p has its pinuse bit on */
2411 #define ok_pinuse(p) pinuse(p)
2412 
2413 #else /* !INSECURE */
2414 #define ok_address(M, a) (1)
2415 #define ok_next(b, n) (1)
2416 #define ok_cinuse(p) (1)
2417 #define ok_pinuse(p) (1)
2418 #endif /* !INSECURE */
2419 
2420 #if (FOOTERS && !INSECURE)
2421 /* Check if (alleged) mstate m has expected magic field */
2422 #define ok_magic(M) ((M)->magic == mparams.magic)
2423 #else /* (FOOTERS && !INSECURE) */
2424 #define ok_magic(M) (1)
2425 #endif /* (FOOTERS && !INSECURE) */
2426 
2427 
2428 /* In gcc, use __builtin_expect to minimize impact of checks */
2429 #if !INSECURE
2430 #if defined(__GNUC__) && __GNUC__ >= 3
2431 #define RTCHECK(e) __builtin_expect(e, 1)
2432 #else /* GNUC */
2433 #define RTCHECK(e) (e)
2434 #endif /* GNUC */
2435 #else /* !INSECURE */
2436 #define RTCHECK(e) (1)
2437 #endif /* !INSECURE */
2438 
2439 /* macros to set up inuse chunks with or without footers */
2440 
2441 #if !FOOTERS
2442 
2443 #define mark_inuse_foot(M,p,s)
2444 
2445 /* Set cinuse bit and pinuse bit of next chunk */
2446 #define set_inuse(M,p,s)\
2447  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2448  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2449 
2450 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
2451 #define set_inuse_and_pinuse(M,p,s)\
2452  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2453  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2454 
2455 /* Set size, cinuse and pinuse bit of this chunk */
2456 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2457  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2458 
2459 #else /* FOOTERS */
2460 
2461 /* Set foot of inuse chunk to be xor of mstate and seed */
2462 #define mark_inuse_foot(M,p,s)\
2463  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2464 
2465 #define get_mstate_for(p)\
2466  ((mstate)(((mchunkptr)((char*)(p) +\
2467  (chunksize(p))))->prev_foot ^ mparams.magic))
2468 
2469 #define set_inuse(M,p,s)\
2470  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2471  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2472  mark_inuse_foot(M,p,s))
2473 
2474 #define set_inuse_and_pinuse(M,p,s)\
2475  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2476  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2477  mark_inuse_foot(M,p,s))
2478 
2479 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2480  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2481  mark_inuse_foot(M, p, s))
2482 
2483 #endif /* !FOOTERS */
2484 
2485 /* ---------------------------- setting mparams -------------------------- */
2486 
2487 /* Initialize mparams */
2488 static int
2490 {
2491  if (mparams.page_size == 0)
2492  {
2493  size_t s;
2494 
2497 #if MORECORE_CONTIGUOUS
2499 #else /* MORECORE_CONTIGUOUS */
2501 #endif /* MORECORE_CONTIGUOUS */
2502 
2503 #if (FOOTERS && !INSECURE)
2504  {
2505 #if USE_DEV_RANDOM
2506  int fd;
2507  unsigned char buf[sizeof (size_t)];
2508  /* Try to use /dev/urandom, else fall back on using time */
2509  if ((fd = open ("/dev/urandom", O_RDONLY)) >= 0 && read (fd, buf, sizeof (buf)) == sizeof (buf))
2510  {
2511  s = *((size_t *) buf);
2512  close (fd);
2513  }
2514  else
2515 #endif /* USE_DEV_RANDOM */
2516  s = (size_t) (time (0) ^ (size_t) 0x55555555U);
2517 
2518  s |= (size_t) 8U; /* ensure nonzero */
2519  s &= ~(size_t) 7U; /* improve chances of fault for bad values */
2520 
2521  }
2522 #else /* (FOOTERS && !INSECURE) */
2523  s = (size_t) 0x58585858U;
2524 #endif /* (FOOTERS && !INSECURE) */
2526  if (mparams.magic == 0)
2527  {
2528  mparams.magic = s;
2529  /* Set up lock for main malloc area */
2530  INITIAL_LOCK (&gm->mutex);
2531  gm->mflags = mparams.default_mflags;
2532  }
2534 
2535 #ifndef WIN32
2538 #else /* WIN32 */
2539  {
2540  SYSTEM_INFO system_info;
2541  GetSystemInfo (&system_info);
2542  mparams.page_size = system_info.dwPageSize;
2543  mparams.granularity = system_info.dwAllocationGranularity;
2544  }
2545 #endif /* WIN32 */
2546 
2547  /* Sanity-check configuration:
2548  size_t must be unsigned and as wide as pointer type.
2549  ints must be at least 4 bytes.
2550  alignment must be at least 8.
2551  Alignment, min chunk size, and page size must all be powers of 2.
2552  */
2553  if ((sizeof (size_t) != sizeof (char *)) ||
2554  (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
2555  (sizeof (int) < 4) ||
2556  (MALLOC_ALIGNMENT < (size_t) 8U) ||
2557  ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
2558  ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
2560  ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
2561  ABORT;
2562  }
2563  return 0;
2564 }
2565 
2566 /* support for mallopt */
2567 static int
2568 change_mparam (int param_number, int value)
2569 {
2570  size_t val = (size_t) value;
2571  init_mparams ();
2572  switch (param_number)
2573  {
2574  case M_TRIM_THRESHOLD:
2575  mparams.trim_threshold = val;
2576  return 1;
2577  case M_GRANULARITY:
2578  if (val >= mparams.page_size && ((val & (val - 1)) == 0))
2579  {
2580  mparams.granularity = val;
2581  return 1;
2582  }
2583  else
2584  return 0;
2585  case M_MMAP_THRESHOLD:
2586  mparams.mmap_threshold = val;
2587  return 1;
2588  default:
2589  return 0;
2590  }
2591 }
2592 
2593 #if DEBUG
2594 /* ------------------------- Debugging Support --------------------------- */
2595 
2596 /* Check properties of any chunk, whether free, inuse, mmapped etc */
2597 static void
2598 do_check_any_chunk (mstate m, mchunkptr p)
2599 {
2600  assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
2601  assert (ok_address (m, p));
2602 }
2603 
2604 /* Check properties of top chunk */
2605 static void
2606 do_check_top_chunk (mstate m, mchunkptr p)
2607 {
2608  msegmentptr sp = segment_holding (m, (char *) p);
2609  size_t sz = chunksize (p);
2610  assert (sp != 0);
2611  assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
2612  assert (ok_address (m, p));
2613  assert (sz == m->topsize);
2614  assert (sz > 0);
2615  assert (sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
2616  assert (pinuse (p));
2617  assert (!next_pinuse (p));
2618 }
2619 
2620 /* Check properties of (inuse) mmapped chunks */
2621 static void
2622 do_check_mmapped_chunk (mstate m, mchunkptr p)
2623 {
2624  size_t sz = chunksize (p);
2625  size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2626  assert (is_mmapped (p));
2627  assert (use_mmap (m));
2628  assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
2629  assert (ok_address (m, p));
2630  assert (!is_small (sz));
2631  assert ((len & (mparams.page_size - SIZE_T_ONE)) == 0);
2632  assert (chunk_plus_offset (p, sz)->head == FENCEPOST_HEAD);
2633  assert (chunk_plus_offset (p, sz + SIZE_T_SIZE)->head == 0);
2634 }
2635 
2636 /* Check properties of inuse chunks */
2637 static void
2638 do_check_inuse_chunk (mstate m, mchunkptr p)
2639 {
2640  do_check_any_chunk (m, p);
2641  assert (cinuse (p));
2642  assert (next_pinuse (p));
2643  /* If not pinuse and not mmapped, previous chunk has OK offset */
2644  assert (is_mmapped (p) || pinuse (p) || next_chunk (prev_chunk (p)) == p);
2645  if (is_mmapped (p))
2646  do_check_mmapped_chunk (m, p);
2647 }
2648 
2649 /* Check properties of free chunks */
2650 static void
2651 do_check_free_chunk (mstate m, mchunkptr p)
2652 {
2653  size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2654  mchunkptr next = chunk_plus_offset (p, sz);
2655  do_check_any_chunk (m, p);
2656  assert (!cinuse (p));
2657  assert (!next_pinuse (p));
2658  assert (!is_mmapped (p));
2659  if (p != m->dv && p != m->top)
2660  {
2661  if (sz >= MIN_CHUNK_SIZE)
2662  {
2663  assert ((sz & CHUNK_ALIGN_MASK) == 0);
2664  assert (is_aligned (chunk2mem (p)));
2665  assert (next->prev_foot == sz);
2666  assert (pinuse (p));
2667  assert (next == m->top || cinuse (next));
2668  assert (p->fd->bk == p);
2669  assert (p->bk->fd == p);
2670  }
2671  else /* markers are always of size SIZE_T_SIZE */
2672  assert (sz == SIZE_T_SIZE);
2673  }
2674 }
2675 
2676 /* Check properties of malloced chunks at the point they are malloced */
2677 static void
2678 do_check_malloced_chunk (mstate m, void *mem, size_t s)
2679 {
2680  if (mem != 0)
2681  {
2682  mchunkptr p = mem2chunk (mem);
2683  size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2684  do_check_inuse_chunk (m, p);
2685  assert ((sz & CHUNK_ALIGN_MASK) == 0);
2686  assert (sz >= MIN_CHUNK_SIZE);
2687  assert (sz >= s);
2688  /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2689  assert (is_mmapped (p) || sz < (s + MIN_CHUNK_SIZE));
2690  }
2691 }
2692 
2693 /* Check a tree and its subtrees. */
2694 static void
2695 do_check_tree (mstate m, tchunkptr t)
2696 {
2697  tchunkptr head = 0;
2698  tchunkptr u = t;
2699  bindex_t tindex = t->index;
2700  size_t tsize = chunksize (t);
2701  bindex_t idx;
2702  compute_tree_index (tsize, idx);
2703  assert (tindex == idx);
2704  assert (tsize >= MIN_LARGE_SIZE);
2705  assert (tsize >= minsize_for_tree_index (idx));
2706  assert ((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index ((idx + 1))));
2707 
2708  do
2709  { /* traverse through chain of same-sized nodes */
2710  do_check_any_chunk (m, ((mchunkptr) u));
2711  assert (u->index == tindex);
2712  assert (chunksize (u) == tsize);
2713  assert (!cinuse (u));
2714  assert (!next_pinuse (u));
2715  assert (u->fd->bk == u);
2716  assert (u->bk->fd == u);
2717  if (u->parent == 0)
2718  {
2719  assert (u->child[0] == 0);
2720  assert (u->child[1] == 0);
2721  }
2722  else
2723  {
2724  assert (head == 0); /* only one node on chain has parent */
2725  head = u;
2726  assert (u->parent != u);
2727  assert (u->parent->child[0] == u || u->parent->child[1] == u || *((tbinptr *) (u->parent)) == u);
2728  if (u->child[0] != 0)
2729  {
2730  assert (u->child[0]->parent == u);
2731  assert (u->child[0] != u);
2732  do_check_tree (m, u->child[0]);
2733  }
2734  if (u->child[1] != 0)
2735  {
2736  assert (u->child[1]->parent == u);
2737  assert (u->child[1] != u);
2738  do_check_tree (m, u->child[1]);
2739  }
2740  if (u->child[0] != 0 && u->child[1] != 0)
2741  {
2742  assert (chunksize (u->child[0]) < chunksize (u->child[1]));
2743  }
2744  }
2745  u = u->fd;
2746  }
2747  while (u != t);
2748  assert (head != 0);
2749 }
2750 
2751 /* Check all the chunks in a treebin. */
2752 static void
2753 do_check_treebin (mstate m, bindex_t i)
2754 {
2755  tbinptr *tb = treebin_at (m, i);
2756  tchunkptr t = *tb;
2757  int empty = (m->treemap & (1U << i)) == 0;
2758  if (t == 0)
2759  assert (empty);
2760  if (!empty)
2761  do_check_tree (m, t);
2762 }
2763 
2764 /* Check all the chunks in a smallbin. */
2765 static void
2766 do_check_smallbin (mstate m, bindex_t i)
2767 {
2768  sbinptr b = smallbin_at (m, i);
2769  mchunkptr p = b->bk;
2770  unsigned int empty = (m->smallmap & (1U << i)) == 0;
2771  if (p == b)
2772  assert (empty);
2773  if (!empty)
2774  {
2775  for (; p != b; p = p->bk)
2776  {
2777  size_t size = chunksize (p);
2778  mchunkptr q;
2779  /* each chunk claims to be free */
2780  do_check_free_chunk (m, p);
2781  /* chunk belongs in bin */
2782  assert (small_index (size) == i);
2783  assert (p->bk == b || chunksize (p->bk) == chunksize (p));
2784  /* chunk is followed by an inuse chunk */
2785  q = next_chunk (p);
2786  if (q->head != FENCEPOST_HEAD)
2787  do_check_inuse_chunk (m, q);
2788  }
2789  }
2790 }
2791 
2792 /* Find x in a bin. Used in other check functions. */
2793 static int
2794 bin_find (mstate m, mchunkptr x)
2795 {
2796  size_t size = chunksize (x);
2797  if (is_small (size))
2798  {
2799  bindex_t sidx = small_index (size);
2800  sbinptr b = smallbin_at (m, sidx);
2801  if (smallmap_is_marked (m, sidx))
2802  {
2803  mchunkptr p = b;
2804  do
2805  {
2806  if (p == x)
2807  return 1;
2808  }
2809  while ((p = p->fd) != b);
2810  }
2811  }
2812  else
2813  {
2814  bindex_t tidx;
2815  compute_tree_index (size, tidx);
2816  if (treemap_is_marked (m, tidx))
2817  {
2818  tchunkptr t = *treebin_at (m, tidx);
2819  size_t sizebits = size << leftshift_for_tree_index (tidx);
2820  while (t != 0 && chunksize (t) != size)
2821  {
2822  t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
2823  sizebits <<= 1;
2824  }
2825  if (t != 0)
2826  {
2827  tchunkptr u = t;
2828  do
2829  {
2830  if (u == (tchunkptr) x)
2831  return 1;
2832  }
2833  while ((u = u->fd) != t);
2834  }
2835  }
2836  }
2837  return 0;
2838 }
2839 
2840 /* Traverse each chunk and check it; return total */
2841 static size_t
2842 traverse_and_check (mstate m)
2843 {
2844  size_t sum = 0;
2845  if (is_initialized (m))
2846  {
2847  msegmentptr s = &m->seg;
2848  sum += m->topsize + TOP_FOOT_SIZE;
2849  while (s != 0)
2850  {
2851  mchunkptr q = align_as_chunk (s->base);
2852  mchunkptr lastq = 0;
2853  assert (pinuse (q));
2854  while (segment_holds (s, q) && q != m->top && q->head != FENCEPOST_HEAD)
2855  {
2856  sum += chunksize (q);
2857  if (cinuse (q))
2858  {
2859  assert (!bin_find (m, q));
2860  do_check_inuse_chunk (m, q);
2861  }
2862  else
2863  {
2864  assert (q == m->dv || bin_find (m, q));
2865  assert (lastq == 0 || cinuse (lastq)); /* Not 2 consecutive free */
2866  do_check_free_chunk (m, q);
2867  }
2868  lastq = q;
2869  q = next_chunk (q);
2870  }
2871  s = s->next;
2872  }
2873  }
2874  return sum;
2875 }
2876 
2877 /* Check all properties of malloc_state. */
2878 static void
2879 do_check_malloc_state (mstate m)
2880 {
2881  bindex_t i;
2882  size_t total;
2883  /* check bins */
2884  for (i = 0; i < NSMALLBINS; ++i)
2885  do_check_smallbin (m, i);
2886  for (i = 0; i < NTREEBINS; ++i)
2887  do_check_treebin (m, i);
2888 
2889  if (m->dvsize != 0)
2890  { /* check dv chunk */
2891  do_check_any_chunk (m, m->dv);
2892  assert (m->dvsize == chunksize (m->dv));
2893  assert (m->dvsize >= MIN_CHUNK_SIZE);
2894  assert (bin_find (m, m->dv) == 0);
2895  }
2896 
2897  if (m->top != 0)
2898  { /* check top chunk */
2899  do_check_top_chunk (m, m->top);
2900  assert (m->topsize == chunksize (m->top));
2901  assert (m->topsize > 0);
2902  assert (bin_find (m, m->top) == 0);
2903  }
2904 
2905  total = traverse_and_check (m);
2906  assert (total <= m->footprint);
2907  assert (m->footprint <= m->max_footprint);
2908 }
2909 #endif /* DEBUG */
2910 
2911 /* ----------------------------- statistics ------------------------------ */
2912 
2913 #if !NO_MALLINFO
2914 static struct mallinfo
2916 {
2917  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2918  if (!PREACTION (m))
2919  {
2920  check_malloc_state (m);
2921  if (is_initialized (m))
2922  {
2923  size_t nfree = SIZE_T_ONE; /* top always free */
2924  size_t mfree = m->topsize + TOP_FOOT_SIZE;
2925  size_t sum = mfree;
2926  msegmentptr s = &m->seg;
2927  while (s != 0)
2928  {
2929  mchunkptr q = align_as_chunk (s->base);
2930  while (segment_holds (s, q) && q != m->top && q->head != FENCEPOST_HEAD)
2931  {
2932  size_t sz = chunksize (q);
2933  sum += sz;
2934  if (!cinuse (q))
2935  {
2936  mfree += sz;
2937  ++nfree;
2938  }
2939  q = next_chunk (q);
2940  }
2941  s = s->next;
2942  }
2943 
2944  nm.arena = sum;
2945  nm.ordblks = nfree;
2946  nm.hblkhd = m->footprint - sum;
2947  nm.usmblks = m->max_footprint;
2948  nm.uordblks = m->footprint - mfree;
2949  nm.fordblks = mfree;
2950  nm.keepcost = m->topsize;
2951  }
2952 
2953  POSTACTION (m);
2954  }
2955  return nm;
2956 }
2957 #endif /* !NO_MALLINFO */
2958 
2959 static void
2961 {
2962  if (!PREACTION (m))
2963  {
2964  size_t maxfp = 0;
2965  size_t fp = 0;
2966  size_t used = 0;
2967  check_malloc_state (m);
2968  if (is_initialized (m))
2969  {
2970  msegmentptr s = &m->seg;
2971  maxfp = m->max_footprint;
2972  fp = m->footprint;
2973  used = fp - (m->topsize + TOP_FOOT_SIZE);
2974 
2975  while (s != 0)
2976  {
2977  mchunkptr q = align_as_chunk (s->base);
2978  while (segment_holds (s, q) && q != m->top && q->head != FENCEPOST_HEAD)
2979  {
2980  if (!cinuse (q))
2981  used -= chunksize (q);
2982  q = next_chunk (q);
2983  }
2984  s = s->next;
2985  }
2986  }
2987 
2988  fprintf (stderr, "max system bytes = %10lu\n", (unsigned long) (maxfp));
2989  fprintf (stderr, "system bytes = %10lu\n", (unsigned long) (fp));
2990  fprintf (stderr, "in use bytes = %10lu\n", (unsigned long) (used));
2991 
2992  POSTACTION (m);
2993  }
2994 }
2995 
2996 /* ----------------------- Operations on smallbins ----------------------- */
2997 
2998 /*
2999  Various forms of linking and unlinking are defined as macros. Even
3000  the ones for trees, which are very long but have very short typical
3001  paths. This is ugly but reduces reliance on inlining support of
3002  compilers.
3003 */
3004 
3005 /* Link a free chunk into a smallbin */
3006 #define insert_small_chunk(M, P, S) {\
3007  bindex_t I = (bindex_t)small_index(S);\
3008  mchunkptr B = smallbin_at(M, I);\
3009  mchunkptr F = B;\
3010  assert(S >= MIN_CHUNK_SIZE);\
3011  if (!smallmap_is_marked(M, I))\
3012  mark_smallmap(M, I);\
3013  else if (RTCHECK(ok_address(M, B->fd)))\
3014  F = B->fd;\
3015  else {\
3016  CORRUPTION_ERROR_ACTION(M);\
3017  }\
3018  B->fd = P;\
3019  F->bk = P;\
3020  P->fd = F;\
3021  P->bk = B;\
3022 }
3023 
3024 /* Unlink a chunk from a smallbin */
3025 #define unlink_small_chunk(M, P, S) {\
3026  mchunkptr F = P->fd;\
3027  mchunkptr B = P->bk;\
3028  bindex_t I = small_index(S);\
3029  assert(P != B);\
3030  assert(P != F);\
3031  assert(chunksize(P) == small_index2size(I));\
3032  if (F == B)\
3033  clear_smallmap(M, I);\
3034  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3035  (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3036  F->bk = B;\
3037  B->fd = F;\
3038  }\
3039  else {\
3040  CORRUPTION_ERROR_ACTION(M);\
3041  }\
3042 }
3043 
3044 /* Unlink the first chunk from a smallbin */
3045 #define unlink_first_small_chunk(M, B, P, I) {\
3046  mchunkptr F = P->fd;\
3047  assert(P != B);\
3048  assert(P != F);\
3049  assert(chunksize(P) == small_index2size(I));\
3050  if (B == F)\
3051  clear_smallmap(M, I);\
3052  else if (RTCHECK(ok_address(M, F))) {\
3053  B->fd = F;\
3054  F->bk = B;\
3055  }\
3056  else {\
3057  CORRUPTION_ERROR_ACTION(M);\
3058  }\
3059 }
3060 
3061 /* Replace dv node, binning the old one */
3062 /* Used only when dvsize known to be small */
3063 #define replace_dv(M, P, S) {\
3064  size_t DVS = M->dvsize;\
3065  if (DVS != 0) {\
3066  mchunkptr DV = M->dv;\
3067  assert(is_small(DVS));\
3068  insert_small_chunk(M, DV, DVS);\
3069  }\
3070  M->dvsize = S;\
3071  M->dv = P;\
3072 }
3073 
3074 /* ------------------------- Operations on trees ------------------------- */
3075 
3076 /* Insert chunk into tree */
3077 #define insert_large_chunk(M, X, S) {\
3078  tbinptr* H;\
3079  bindex_t I;\
3080  compute_tree_index(S, I);\
3081  H = treebin_at(M, I);\
3082  X->index = I;\
3083  X->child[0] = X->child[1] = 0;\
3084  if (!treemap_is_marked(M, I)) {\
3085  mark_treemap(M, I);\
3086  *H = X;\
3087  X->parent = (tchunkptr)H;\
3088  X->fd = X->bk = X;\
3089  }\
3090  else {\
3091  tchunkptr T = *H;\
3092  size_t K = S << leftshift_for_tree_index(I);\
3093  for (;;) {\
3094  if (chunksize(T) != S) {\
3095  tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3096  K <<= 1;\
3097  if (*C != 0)\
3098  T = *C;\
3099  else if (RTCHECK(ok_address(M, C))) {\
3100  *C = X;\
3101  X->parent = T;\
3102  X->fd = X->bk = X;\
3103  break;\
3104  }\
3105  else {\
3106  CORRUPTION_ERROR_ACTION(M);\
3107  break;\
3108  }\
3109  }\
3110  else {\
3111  tchunkptr F = T->fd;\
3112  if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3113  T->fd = F->bk = X;\
3114  X->fd = F;\
3115  X->bk = T;\
3116  X->parent = 0;\
3117  break;\
3118  }\
3119  else {\
3120  CORRUPTION_ERROR_ACTION(M);\
3121  break;\
3122  }\
3123  }\
3124  }\
3125  }\
3126 }
3127 
3128 /*
3129  Unlink steps:
3130 
3131  1. If x is a chained node, unlink it from its same-sized fd/bk links
3132  and choose its bk node as its replacement.
3133  2. If x was the last node of its size, but not a leaf node, it must
3134  be replaced with a leaf node (not merely one with an open left or
3135  right), to make sure that lefts and rights of descendents
3136  correspond properly to bit masks. We use the rightmost descendent
3137  of x. We could use any other leaf, but this is easy to locate and
3138  tends to counteract removal of leftmosts elsewhere, and so keeps
3139  paths shorter than minimally guaranteed. This doesn't loop much
3140  because on average a node in a tree is near the bottom.
3141  3. If x is the base of a chain (i.e., has parent links) relink
3142  x's parent and children to x's replacement (or null if none).
3143 */
3144 
3145 #define unlink_large_chunk(M, X) {\
3146  tchunkptr XP = X->parent;\
3147  tchunkptr R;\
3148  if (X->bk != X) {\
3149  tchunkptr F = X->fd;\
3150  R = X->bk;\
3151  if (RTCHECK(ok_address(M, F))) {\
3152  F->bk = R;\
3153  R->fd = F;\
3154  }\
3155  else {\
3156  CORRUPTION_ERROR_ACTION(M);\
3157  }\
3158  }\
3159  else {\
3160  tchunkptr* RP;\
3161  if (((R = *(RP = &(X->child[1]))) != 0) ||\
3162  ((R = *(RP = &(X->child[0]))) != 0)) {\
3163  tchunkptr* CP;\
3164  while ((*(CP = &(R->child[1])) != 0) ||\
3165  (*(CP = &(R->child[0])) != 0)) {\
3166  R = *(RP = CP);\
3167  }\
3168  if (RTCHECK(ok_address(M, RP)))\
3169  *RP = 0;\
3170  else {\
3171  CORRUPTION_ERROR_ACTION(M);\
3172  }\
3173  }\
3174  }\
3175  if (XP != 0) {\
3176  tbinptr* H = treebin_at(M, X->index);\
3177  if (X == *H) {\
3178  if ((*H = R) == 0) \
3179  clear_treemap(M, X->index);\
3180  }\
3181  else if (RTCHECK(ok_address(M, XP))) {\
3182  if (XP->child[0] == X) \
3183  XP->child[0] = R;\
3184  else \
3185  XP->child[1] = R;\
3186  }\
3187  else\
3188  CORRUPTION_ERROR_ACTION(M);\
3189  if (R != 0) {\
3190  if (RTCHECK(ok_address(M, R))) {\
3191  tchunkptr C0, C1;\
3192  R->parent = XP;\
3193  if ((C0 = X->child[0]) != 0) {\
3194  if (RTCHECK(ok_address(M, C0))) {\
3195  R->child[0] = C0;\
3196  C0->parent = R;\
3197  }\
3198  else\
3199  CORRUPTION_ERROR_ACTION(M);\
3200  }\
3201  if ((C1 = X->child[1]) != 0) {\
3202  if (RTCHECK(ok_address(M, C1))) {\
3203  R->child[1] = C1;\
3204  C1->parent = R;\
3205  }\
3206  else\
3207  CORRUPTION_ERROR_ACTION(M);\
3208  }\
3209  }\
3210  else\
3211  CORRUPTION_ERROR_ACTION(M);\
3212  }\
3213  }\
3214 }
3215 
3216 /* Relays to large vs small bin operations */
3217 
3218 #define insert_chunk(M, P, S)\
3219  if (is_small(S)) insert_small_chunk(M, P, S)\
3220  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3221 
3222 #define unlink_chunk(M, P, S)\
3223  if (is_small(S)) unlink_small_chunk(M, P, S)\
3224  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3225 
3226 
3227 /* Relays to internal calls to malloc/free from realloc, memalign etc */
3228 
3229 #if ONLY_MSPACES
3230 #define internal_malloc(m, b) mspace_malloc(m, b)
3231 #define internal_free(m, mem) mspace_free(m,mem);
3232 #else /* ONLY_MSPACES */
3233 #if MSPACES
3234 #define internal_malloc(m, b)\
3235  (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3236 #define internal_free(m, mem)\
3237  if (m == gm) dlfree(mem); else mspace_free(m,mem);
3238 #else /* MSPACES */
3239 #define internal_malloc(m, b) dlmalloc(b)
3240 #define internal_free(m, mem) dlfree(mem)
3241 #endif /* MSPACES */
3242 #endif /* ONLY_MSPACES */
3243 
3244 /* ----------------------- Direct-mmapping chunks ----------------------- */
3245 
3246 /*
3247  Directly mmapped chunks are set up with an offset to the start of
3248  the mmapped region stored in the prev_foot field of the chunk. This
3249  allows reconstruction of the required argument to MUNMAP when freed,
3250  and also allows adjustment of the returned chunk to meet alignment
3251  requirements (especially in memalign). There is also enough space
3252  allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
3253  the PINUSE bit so frees can be checked.
3254 */
3255 
3256 /* Malloc using mmap */
3257 static void *
3258 mmap_alloc (mstate m, size_t nb)
3259 {
3260 #if defined(ENABLE_SEPARATE_MMAP_EVENT_TRACE)
3262 #else
3263  size_t mmsize = granularity_align (nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3264 #endif
3265  if (mmsize > nb)
3266  { /* Check for wrap around 0 */
3267  char *mm = (char *) (DIRECT_MMAP (mmsize));
3268  if (mm != CMFAIL)
3269  {
3270  size_t offset = align_offset (chunk2mem (mm));
3271  size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3272  mchunkptr p = (mchunkptr) (mm + offset);
3273  p->prev_foot = offset | IS_MMAPPED_BIT;
3274  (p)->head = (psize | CINUSE_BIT);
3275  mark_inuse_foot (m, p, psize);
3276  chunk_plus_offset (p, psize)->head = FENCEPOST_HEAD;
3277  chunk_plus_offset (p, psize + SIZE_T_SIZE)->head = 0;
3278 
3279  if (mm < m->least_addr)
3280  m->least_addr = mm;
3281  if ((m->footprint += mmsize) > m->max_footprint)
3282  m->max_footprint = m->footprint;
3283  assert (is_aligned (chunk2mem (p)));
3284  check_mmapped_chunk (m, p);
3285 #if defined(ENABLE_SEPARATE_MMAP_EVENT_TRACE)
3286  do
3287  {
3288  void *h;
3289  h = chunk_plus_offset (p, psize - sizeof (MMAP_TRACE_H));
3290  mmap_called (m, mm, (MMAP_TRACE_H *) h);
3291  }
3292  while (0);
3293 #endif
3294  return chunk2mem (p);
3295  }
3296  }
3297  return 0;
3298 }
3299 
3300 /* Realloc using mmap */
3301 static mchunkptr
3302 mmap_resize (mstate m, mchunkptr oldp, size_t nb)
3303 {
3304  size_t oldsize = chunksize (oldp);
3305  if (is_small (nb)) /* Can't shrink mmap regions below small size */
3306  return 0;
3307  /* Keep old chunk if big enough but not too big */
3308 #if defined(ENABLE_SEPARATE_MMAP_EVENT_TRACE)
3309  if (oldsize >= nb + SIZE_T_SIZE + MMAP_TRACE_H_SIZE && (oldsize - nb) <= (mparams.granularity << 1))
3310  return oldp;
3311 #else
3312  if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1))
3313  return oldp;
3314 #endif /* ENABLE_SEPARATE_MMAP_EVENT_TRACE */
3315  else
3316  {
3317  size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3318  size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3319  size_t newmmsize = granularity_align (nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3320  char *cp = (char *) CALL_MREMAP ((char *) oldp - offset,
3321  oldmmsize, newmmsize, 1);
3322  if (cp != CMFAIL)
3323  {
3324  mchunkptr newp = (mchunkptr) (cp + offset);
3325  size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3326  newp->head = (psize | CINUSE_BIT);
3327  mark_inuse_foot (m, newp, psize);
3328  chunk_plus_offset (newp, psize)->head = FENCEPOST_HEAD;
3329  chunk_plus_offset (newp, psize + SIZE_T_SIZE)->head = 0;
3330 
3331  if (cp < m->least_addr)
3332  m->least_addr = cp;
3333  if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3334  m->max_footprint = m->footprint;
3335  check_mmapped_chunk (m, newp);
3336  return newp;
3337  }
3338  }
3339  return 0;
3340 }
3341 
3342 /* -------------------------- mspace management -------------------------- */
3343 
3344 /* Initialize top chunk and its size */
3345 static void
3346 init_top (mstate m, mchunkptr p, size_t psize)
3347 {
3348  /* Ensure alignment */
3349  size_t offset = align_offset (chunk2mem (p));
3350  p = (mchunkptr) ((char *) p + offset);
3351  psize -= offset;
3352 
3353  m->top = p;
3354  m->topsize = psize;
3355  p->head = psize | PINUSE_BIT;
3356  /* set size of fake trailing chunk holding overhead space only once */
3357  chunk_plus_offset (p, psize)->head = TOP_FOOT_SIZE;
3358  m->trim_check = mparams.trim_threshold; /* reset on each update */
3359 }
3360 
3361 /* Initialize bins for a new mstate that is otherwise zeroed out */
3362 static void
3363 init_bins (mstate m)
3364 {
3365  /* Establish circular links for smallbins */
3366  bindex_t i;
3367  for (i = 0; i < NSMALLBINS; ++i)
3368  {
3369  sbinptr bin = smallbin_at (m, i);
3370  bin->fd = bin->bk = bin;
3371  }
3372 }
3373 
3374 #if PROCEED_ON_ERROR
3375 
3376 /* default corruption action */
3377 static void
3378 reset_on_error (mstate m)
3379 {
3380  int i;
3381  ++malloc_corruption_error_count;
3382  /* Reinitialize fields to forget about all memory */
3383  m->smallbins = m->treebins = 0;
3384  m->dvsize = m->topsize = 0;
3385  m->seg.base = 0;
3386  m->seg.size = 0;
3387  m->seg.next = 0;
3388  m->top = m->dv = 0;
3389  for (i = 0; i < NTREEBINS; ++i)
3390  *treebin_at (m, i) = 0;
3391  init_bins (m);
3392 }
3393 #endif /* PROCEED_ON_ERROR */
3394 
3395 /* Allocate chunk and prepend remainder with chunk in successor base. */
3396 static void *
3397 prepend_alloc (mstate m, char *newbase, char *oldbase, size_t nb)
3398 {
3399  mchunkptr p = align_as_chunk (newbase);
3400  mchunkptr oldfirst = align_as_chunk (oldbase);
3401  size_t psize = (char *) oldfirst - (char *) p;
3402  mchunkptr q = chunk_plus_offset (p, nb);
3403  size_t qsize = psize - nb;
3405 
3406  assert ((char *) oldfirst > (char *) q);
3407  assert (pinuse (oldfirst));
3408  assert (qsize >= MIN_CHUNK_SIZE);
3409 
3410  /* consolidate remainder with first chunk of old base */
3411  if (oldfirst == m->top)
3412  {
3413  size_t tsize = m->topsize += qsize;
3414  m->top = q;
3415  q->head = tsize | PINUSE_BIT;
3416  check_top_chunk (m, q);
3417  }
3418  else if (oldfirst == m->dv)
3419  {
3420  size_t dsize = m->dvsize += qsize;
3421  m->dv = q;
3423  }
3424  else
3425  {
3426  if (!cinuse (oldfirst))
3427  {
3428  size_t nsize = chunksize (oldfirst);
3429  unlink_chunk (m, oldfirst, (bindex_t) nsize);
3430  oldfirst = chunk_plus_offset (oldfirst, nsize);
3431  qsize += nsize;
3432  }
3433  set_free_with_pinuse (q, qsize, oldfirst);
3434  insert_chunk (m, q, (bindex_t) qsize);
3435  check_free_chunk (m, q);
3436  }
3437 
3438  check_malloced_chunk (m, chunk2mem (p), nb);
3439  return chunk2mem (p);
3440 }
3441 
3442 
3443 /* Add a segment to hold a new noncontiguous region */
3444 static void
3445 add_segment (mstate m, char *tbase, size_t tsize, flag_t mmapped)
3446 {
3447  /* Determine locations and sizes of segment, fenceposts, old top */
3448  char *old_top = (char *) m->top;
3449  msegmentptr oldsp = segment_holding (m, old_top);
3450  char *old_end = oldsp->base + oldsp->size;
3451  size_t ssize = pad_request (sizeof (struct malloc_segment));
3452  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3453  size_t offset = align_offset (chunk2mem (rawsp));
3454  char *asp = rawsp + offset;
3455  char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
3456  mchunkptr sp = (mchunkptr) csp;
3457  msegmentptr ss = (msegmentptr) (chunk2mem (sp));
3458  mchunkptr tnext = chunk_plus_offset (sp, ssize);
3459  mchunkptr p = tnext;
3460  int nfences = 0;
3461 
3462  /* reset top to new space */
3463  init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3464 
3465  /* Set up segment record */
3466  assert (is_aligned (ss));
3467  set_size_and_pinuse_of_inuse_chunk (m, sp, ssize);
3468  *ss = m->seg; /* Push current record */
3469  m->seg.base = tbase;
3470  m->seg.size = tsize;
3471  m->seg.sflags = mmapped;
3472  m->seg.next = ss;
3473 
3474  /* Insert trailing fenceposts */
3475  for (;;)
3476  {
3477  mchunkptr nextp = chunk_plus_offset (p, SIZE_T_SIZE);
3478  p->head = FENCEPOST_HEAD;
3479  ++nfences;
3480  if ((char *) (&(nextp->head)) < old_end)
3481  p = nextp;
3482  else
3483  break;
3484  }
3485  assert (nfences >= 2);
3486 
3487  /* Insert the rest of old top into a bin as an ordinary free chunk */
3488  if (csp != old_top)
3489  {
3490  mchunkptr q = (mchunkptr) old_top;
3491  size_t psize = csp - old_top;
3492  mchunkptr tn = chunk_plus_offset (q, psize);
3493  set_free_with_pinuse (q, psize, tn);
3494  insert_chunk (m, q, (bindex_t) psize);
3495  }
3496 
3497  check_top_chunk (m, m->top);
3498 }
3499 
3500 /* -------------------------- System allocation -------------------------- */
3501 
3502 /* Get memory from system using MORECORE or MMAP */
3503 static void *
3504 sys_alloc (mstate m, size_t nb)
3505 {
3506  char *tbase = CMFAIL;
3507  size_t tsize = 0;
3508  flag_t mmap_flag = 0;
3509 
3510  init_mparams ();
3511 
3512  /* Directly map large chunks */
3513  if (use_mmap (m) && nb >= mparams.mmap_threshold)
3514  {
3515  void *mem = mmap_alloc (m, nb);
3516  if (mem != 0)
3517  return mem;
3518  }
3519 
3520  /*
3521  Try getting memory in any of three ways (in most-preferred to
3522  least-preferred order):
3523  1. A call to MORECORE that can normally contiguously extend memory.
3524  (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3525  or main space is mmapped or a previous contiguous call failed)
3526  2. A call to MMAP new space (disabled if not HAVE_MMAP).
3527  Note that under the default settings, if MORECORE is unable to
3528  fulfill a request, and HAVE_MMAP is true, then mmap is
3529  used as a noncontiguous system allocator. This is a useful backup
3530  strategy for systems with holes in address spaces -- in this case
3531  sbrk cannot contiguously expand the heap, but mmap may be able to
3532  find space.
3533  3. A call to MORECORE that cannot usually contiguously extend memory.
3534  (disabled if not HAVE_MORECORE)
3535  */
3536 
3538  {
3539  char *br = CMFAIL;
3540  msegmentptr ss = (m->top == 0) ? 0 : segment_holding (m, (char *) m->top);
3541  size_t asize = 0;
3543 
3544  if (ss == 0)
3545  { /* First time through or recovery */
3546  char *base = (char *) CALL_MORECORE (0);
3547  if (base != CMFAIL)
3548  {
3549  asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3550  /* Adjust to end on a page boundary */
3551  if (!is_page_aligned (base))
3552  asize += (page_align ((size_t) base) - (size_t) base);
3553  /* Can't call MORECORE if size is negative when treated as signed */
3554  if (asize < HALF_MAX_SIZE_T && (br = (char *) (CALL_MORECORE (asize))) == base)
3555  {
3556  tbase = base;
3557  tsize = asize;
3558  }
3559  }
3560  }
3561  else
3562  {
3563  /* Subtract out existing available top space from MORECORE request. */
3564  asize = granularity_align (nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
3565  /* Use mem here only if it did continuously extend old space */
3566  if (asize < HALF_MAX_SIZE_T && (br = (char *) (CALL_MORECORE (asize))) == ss->base + ss->size)
3567  {
3568  tbase = br;
3569  tsize = asize;
3570  }
3571  }
3572 
3573  if (tbase == CMFAIL)
3574  { /* Cope with partial failure */
3575  if (br != CMFAIL)
3576  { /* Try to use/extend the space we did get */
3577  if (asize < HALF_MAX_SIZE_T && asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE)
3578  {
3579  size_t esize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
3580  if (esize < HALF_MAX_SIZE_T)
3581  {
3582  char *end = (char *) CALL_MORECORE (esize);
3583  if (end != CMFAIL)
3584  asize += esize;
3585  else
3586  { /* Can't use; try to release */
3587  CALL_MORECORE (-asize);
3588  br = CMFAIL;
3589  }
3590  }
3591  }
3592  }
3593  if (br != CMFAIL)
3594  { /* Use the space we did get */
3595  tbase = br;
3596  tsize = asize;
3597  }
3598  else
3599  disable_contiguous (m); /* Don't try contiguous path in the future */
3600  }
3601 
3603  }
3604 
3605  if (HAVE_MMAP && tbase == CMFAIL)
3606  { /* Try MMAP */
3607  size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
3608  size_t rsize = granularity_align (req);
3609  if (rsize > nb)
3610  { /* Fail if wraps around zero */
3611  char *mp = (char *) (CALL_MMAP (rsize));
3612  if (mp != CMFAIL)
3613  {
3614  tbase = mp;
3615  tsize = rsize;
3616  mmap_flag = IS_MMAPPED_BIT;
3617  }
3618  }
3619  }
3620 
3621  if (HAVE_MORECORE && tbase == CMFAIL)
3622  { /* Try noncontiguous MORECORE */
3623  size_t asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3624  if (asize < HALF_MAX_SIZE_T)
3625  {
3626  char *br = CMFAIL;
3627  char *end = CMFAIL;
3629  br = (char *) (CALL_MORECORE (asize));
3630  end = (char *) (CALL_MORECORE (0));
3632  if (br != CMFAIL && end != CMFAIL && br < end)
3633  {
3634  size_t ssize = end - br;
3635  if (ssize > nb + TOP_FOOT_SIZE)
3636  {
3637  tbase = br;
3638  tsize = ssize;
3639  }
3640  }
3641  }
3642  }
3643 
3644  if (tbase != CMFAIL)
3645  {
3646 
3647  if ((m->footprint += tsize) > m->max_footprint)
3648  m->max_footprint = m->footprint;
3649 
3650  if (!is_initialized (m))
3651  { /* first-time initialization */
3652  m->seg.base = m->least_addr = tbase;
3653  m->seg.size = tsize;
3654  m->seg.sflags = mmap_flag;
3655  m->magic = mparams.magic;
3656  init_bins (m);
3657  if (is_global (m))
3658  init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3659  else
3660  {
3661  /* Offset top by embedded malloc_state */
3662  mchunkptr mn = next_chunk (mem2chunk (m));
3663  init_top (m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
3664  }
3665  }
3666 
3667  else
3668  {
3669  /* Try to merge with an existing segment */
3670  msegmentptr sp = &m->seg;
3671  while (sp != 0 && tbase != sp->base + sp->size)
3672  sp = sp->next;
3673  if (sp != 0 &&
3674  !is_extern_segment (sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds (sp, m->top))
3675  { /* append */
3676  sp->size += tsize;
3677  init_top (m, m->top, m->topsize + tsize);
3678  }
3679  else
3680  {
3681  if (tbase < m->least_addr)
3682  m->least_addr = tbase;
3683  sp = &m->seg;
3684  while (sp != 0 && sp->base != tbase + tsize)
3685  sp = sp->next;
3686  if (sp != 0 && !is_extern_segment (sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag)
3687  {
3688  char *oldbase = sp->base;
3689  sp->base = tbase;
3690  sp->size += tsize;
3691  return prepend_alloc (m, tbase, oldbase, nb);
3692  }
3693  else
3694  add_segment (m, tbase, tsize, mmap_flag);
3695  }
3696  }
3697 
3698  if (nb < m->topsize)
3699  { /* Allocate from new or extended top space */
3700  size_t rsize = m->topsize -= nb;
3701  mchunkptr p = m->top;
3702  mchunkptr r = m->top = chunk_plus_offset (p, nb);
3703  r->head = rsize | PINUSE_BIT;
3705  check_top_chunk (m, m->top);
3706  check_malloced_chunk (m, chunk2mem (p), nb);
3707  return chunk2mem (p);
3708  }
3709  }
3710 
3712  return 0;
3713 }
3714 
3715 /* ----------------------- system deallocation -------------------------- */
3716 
3717 /* Unmap and unlink any mmapped segments that don't contain used chunks */
3718 static size_t
3720 {
3721  size_t released = 0;
3722  msegmentptr pred = &m->seg;
3723  msegmentptr sp = pred->next;
3724  while (sp != 0)
3725  {
3726  char *base = sp->base;
3727  size_t size = sp->size;
3728  msegmentptr next = sp->next;
3729  if (is_mmapped_segment (sp) && !is_extern_segment (sp))
3730  {
3731  mchunkptr p = align_as_chunk (base);
3732  size_t psize = chunksize (p);
3733  /* Can unmap if first chunk holds entire segment and not pinned */
3734  if (!cinuse (p) && (char *) p + psize >= base + size - TOP_FOOT_SIZE)
3735  {
3736  tchunkptr tp = (tchunkptr) p;
3737  assert (segment_holds (sp, (char *) sp));
3738  if (p == m->dv)
3739  {
3740  m->dv = 0;
3741  m->dvsize = 0;
3742  }
3743  else
3744  {
3745  unlink_large_chunk (m, tp);
3746  }
3747  if (CALL_MUNMAP (base, size) == 0)
3748  {
3749  released += size;
3750  m->footprint -= size;
3751  /* unlink obsoleted record */
3752  sp = pred;
3753  sp->next = next;
3754  }
3755  else
3756  { /* back out if cannot unmap */
3757  insert_large_chunk (m, tp, psize);
3758  }
3759  }
3760  }
3761  pred = sp;
3762  sp = next;
3763  }
3764  return released;
3765 }
3766 
3767 static int
3768 sys_trim (mstate m, size_t pad)
3769 {
3770  size_t released = 0;
3771  if (pad < MAX_REQUEST && is_initialized (m))
3772  {
3773  pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3774 
3775  if (m->topsize > pad)
3776  {
3777  /* Shrink top space in granularity-size units, keeping at least one */
3778  size_t unit = mparams.granularity;
3779  size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit;
3780  msegmentptr sp = segment_holding (m, (char *) m->top);
3781 
3782  if (!is_extern_segment (sp))
3783  {
3784  if (is_mmapped_segment (sp))
3785  {
3786  if (HAVE_MMAP && sp->size >= extra && !has_segment_link (m, sp))
3787  { /* can't shrink if pinned */
3788  size_t newsize = sp->size - extra;
3789  /* Prefer mremap, fall back to munmap */
3790  if ((CALL_MREMAP (sp->base, sp->size, newsize, 0) !=
3791  MFAIL) || (CALL_MUNMAP (sp->base + newsize, extra) == 0))
3792  {
3793  released = extra;
3794  }
3795  }
3796  }
3797  else if (HAVE_MORECORE)
3798  {
3799  if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3800  extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3802  {
3803  /* Make sure end of memory is where we last set it. */
3804  char *old_br = (char *) (CALL_MORECORE (0));
3805  if (old_br == sp->base + sp->size)
3806  {
3807  char *rel_br = (char *) (CALL_MORECORE (-extra));
3808  char *new_br = (char *) (CALL_MORECORE (0));
3809  if (rel_br != CMFAIL && new_br < old_br)
3810  released = old_br - new_br;
3811  }
3812  }
3814  }
3815  }
3816 
3817  if (released != 0)
3818  {
3819  sp->size -= released;
3820  m->footprint -= released;
3821  init_top (m, m->top, m->topsize - released);
3822  check_top_chunk (m, m->top);
3823  }
3824  }
3825 
3826  /* Unmap any unused mmapped segments */
3827  if (HAVE_MMAP && !USE_MALLOC_INSTEAD)
3828  released += release_unused_segments (m);
3829 
3830  /* On failure, disable autotrim to avoid repeated failed future calls */
3831  if (released == 0)
3832  m->trim_check = MAX_SIZE_T;
3833  }
3834 
3835  return (released != 0) ? 1 : 0;
3836 }
3837 
3838 /* ---------------------------- malloc support --------------------------- */
3839 
3840 /* allocate a large request from the best fitting chunk in a treebin */
3841 static void *
3842 tmalloc_large (mstate m, size_t nb)
3843 {
3844  tchunkptr v = 0;
3845  size_t rsize = -nb; /* Unsigned negation */
3846  tchunkptr t;
3847  bindex_t idx;
3848  compute_tree_index (nb, idx);
3849 
3850  if ((t = *treebin_at (m, idx)) != 0)
3851  {
3852  /* Traverse tree for this bin looking for node with size == nb */
3853  size_t sizebits = nb << leftshift_for_tree_index (idx);
3854  tchunkptr rst = 0; /* The deepest untaken right subtree */
3855  for (;;)
3856  {
3857  tchunkptr rt;
3858  size_t trem = chunksize (t) - nb;
3859  if (trem < rsize)
3860  {
3861  v = t;
3862  if ((rsize = trem) == 0)
3863  break;
3864  }
3865  rt = t->child[1];
3866  t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
3867  if (rt != 0 && rt != t)
3868  rst = rt;
3869  if (t == 0)
3870  {
3871  t = rst; /* set t to least subtree holding sizes > nb */
3872  break;
3873  }
3874  sizebits <<= 1;
3875  }
3876  }
3877 
3878  if (t == 0 && v == 0)
3879  { /* set t to root of next non-empty treebin */
3880  binmap_t leftbits = left_bits (idx2bit (idx)) & m->treemap;
3881  if (leftbits != 0)
3882  {
3883  bindex_t i;
3884  binmap_t leastbit = least_bit (leftbits);
3885  compute_bit2idx (leastbit, i);
3886  t = *treebin_at (m, i);
3887  }
3888  }
3889 
3890  while (t != 0)
3891  { /* find smallest of tree or subtree */
3892  size_t trem = chunksize (t) - nb;
3893  if (trem < rsize)
3894  {
3895  rsize = trem;
3896  v = t;
3897  }
3898  t = leftmost_child (t);
3899  }
3900 
3901  /* If dv is a better fit, return 0 so malloc will use it */
3902  if (v != 0 && rsize < (size_t) (m->dvsize - nb))
3903  {
3904  if (RTCHECK (ok_address (m, v)))
3905  { /* split */
3906  mchunkptr r = chunk_plus_offset (v, nb);
3907  assert (chunksize (v) == rsize + nb);
3908  if (RTCHECK (ok_next (v, r)))
3909  {
3910  unlink_large_chunk (m, v);
3911  if (rsize < MIN_CHUNK_SIZE)
3912  set_inuse_and_pinuse (m, v, (rsize + nb));
3913  else
3914  {
3917  insert_chunk (m, r, (bindex_t) rsize);
3918  }
3919  return chunk2mem (v);
3920  }
3921  }
3923  }
3924  return 0;
3925 }
3926 
3927 /* allocate a small request from the best fitting chunk in a treebin */
3928 static void *
3929 tmalloc_small (mstate m, size_t nb)
3930 {
3931  tchunkptr t, v;
3932  size_t rsize;
3933  bindex_t i;
3934  binmap_t leastbit = least_bit (m->treemap);
3935  compute_bit2idx (leastbit, i);
3936 
3937  v = t = *treebin_at (m, i);
3938  rsize = chunksize (t) - nb;
3939 
3940  while ((t = leftmost_child (t)) != 0)
3941  {
3942  size_t trem = chunksize (t) - nb;
3943  if (trem < rsize)
3944  {
3945  rsize = trem;
3946  v = t;
3947  }
3948  }
3949 
3950  if (RTCHECK (ok_address (m, v)))
3951  {
3952  mchunkptr r = chunk_plus_offset (v, nb);
3953  assert (chunksize (v) == rsize + nb);
3954  if (RTCHECK (ok_next (v, r)))
3955  {
3956  unlink_large_chunk (m, v);
3957  if (rsize < MIN_CHUNK_SIZE)
3958  set_inuse_and_pinuse (m, v, (rsize + nb));
3959  else
3960  {
3963  replace_dv (m, r, rsize);
3964  }
3965  return chunk2mem (v);
3966  }
3967  }
3968 
3970  return 0;
3971 }
3972 
3973 /* --------------------------- realloc support --------------------------- */
3974 
3975 static void *
3976 internal_realloc (mstate m, void *oldmem, size_t bytes)
3977 {
3978  if (bytes >= MAX_REQUEST)
3979  {
3981  return 0;
3982  }
3983  if (!PREACTION (m))
3984  {
3985  mchunkptr oldp = mem2chunk (oldmem);
3986  size_t oldsize = chunksize (oldp);
3987  mchunkptr next = chunk_plus_offset (oldp, oldsize);
3988  mchunkptr newp = 0;
3989  void *extra = 0;
3990 
3991  /* Try to either shrink or extend into top. Else malloc-copy-free */
3992 
3993  if (RTCHECK (ok_address (m, oldp) && ok_cinuse (oldp) && ok_next (oldp, next) && ok_pinuse (next)))
3994  {
3995  size_t nb = request2size (bytes);
3996  if (is_mmapped (oldp))
3997  newp = mmap_resize (m, oldp, nb);
3998  else if (oldsize >= nb)
3999  { /* already big enough */
4000  size_t rsize = oldsize - nb;
4001  newp = oldp;
4002  if (rsize >= MIN_CHUNK_SIZE)
4003  {
4004  mchunkptr remainder = chunk_plus_offset (newp, nb);
4005  set_inuse (m, newp, nb);
4006  set_inuse (m, remainder, rsize);
4007  extra = chunk2mem (remainder);
4008  }
4009  }
4010  else if (next == m->top && oldsize + m->topsize > nb)
4011  {
4012  /* Expand into top */
4013  size_t newsize = oldsize + m->topsize;
4014  size_t newtopsize = newsize - nb;
4015  mchunkptr newtop = chunk_plus_offset (oldp, nb);
4016  set_inuse (m, oldp, nb);
4017  newtop->head = newtopsize | PINUSE_BIT;
4018  m->top = newtop;
4019  m->topsize = newtopsize;
4020  newp = oldp;
4021  }
4022  }
4023  else
4024  {
4025  USAGE_ERROR_ACTION (m, oldmem);
4026  POSTACTION (m);
4027  return 0;
4028  }
4029 
4030  POSTACTION (m);
4031 
4032  if (newp != 0)
4033  {
4034  if (extra != 0)
4035  {
4036  internal_free (m, extra);
4037  }
4038  check_inuse_chunk (m, newp);
4039  return chunk2mem (newp);
4040  }
4041  else
4042  {
4043  void *newmem = internal_malloc (m, bytes);
4044  if (newmem != 0)
4045  {
4046  size_t oc = oldsize - overhead_for (oldp);
4047  memcpy (newmem, oldmem, (oc < bytes) ? oc : bytes);
4048  internal_free (m, oldmem);
4049  }
4050  return newmem;
4051  }
4052  }
4053  return 0;
4054 }
4055 
4056 /* --------------------------- memalign support -------------------------- */
4057 
4058 static void *
4059 internal_memalign (mstate m, size_t alignment, size_t bytes)
4060 {
4061  if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
4062  return internal_malloc (m, bytes);
4063  if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
4064  alignment = MIN_CHUNK_SIZE;
4065  if ((alignment & (alignment - SIZE_T_ONE)) != 0)
4066  { /* Ensure a power of 2 */
4067  size_t a = MALLOC_ALIGNMENT << 1;
4068  while (a < alignment)
4069  a <<= 1;
4070  alignment = a;
4071  }
4072 
4073  if (bytes >= MAX_REQUEST - alignment)
4074  {
4075  if (m != 0)
4076  { /* Test isn't needed but avoids compiler warning */
4078  }
4079  }
4080  else
4081  {
4082  size_t nb = request2size (bytes);
4083  size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4084  char *mem = (char *) internal_malloc (m, req);
4085  if (mem != 0)
4086  {
4087  void *leader = 0;
4088  void *trailer = 0;
4089  mchunkptr p = mem2chunk (mem);
4090 
4091  if (PREACTION (m))
4092  return 0;
4093  if ((((size_t) (mem)) % alignment) != 0)
4094  { /* misaligned */
4095  /*
4096  Find an aligned spot inside chunk. Since we need to give
4097  back leading space in a chunk of at least MIN_CHUNK_SIZE, if
4098  the first calculation places us at a spot with less than
4099  MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
4100  We've allocated enough total room so that this is always
4101  possible.
4102  */
4103  char *br = (char *) mem2chunk ((size_t) (((size_t) (mem + alignment - SIZE_T_ONE)) & -alignment));
4104  char *pos = ((size_t) (br - (char *) (p)) >= MIN_CHUNK_SIZE) ? br : br + alignment;
4105  mchunkptr newp = (mchunkptr) pos;
4106  size_t leadsize = pos - (char *) (p);
4107  size_t newsize = chunksize (p) - leadsize;
4108 
4109  if (is_mmapped (p))
4110  { /* For mmapped chunks, just adjust offset */
4111  newp->prev_foot = p->prev_foot + leadsize;
4112  newp->head = (newsize | CINUSE_BIT);
4113  }
4114  else
4115  { /* Otherwise, give back leader, use the rest */
4116  set_inuse (m, newp, newsize);
4117  set_inuse (m, p, leadsize);
4118  leader = chunk2mem (p);
4119  }
4120  p = newp;
4121  }
4122 
4123  /* Give back spare room at the end */
4124  if (!is_mmapped (p))
4125  {
4126  size_t size = chunksize (p);
4127  if (size > nb + MIN_CHUNK_SIZE)
4128  {
4129  size_t remainder_size = size - nb;
4130  mchunkptr remainder = chunk_plus_offset (p, nb);
4131  set_inuse (m, p, nb);
4132  set_inuse (m, remainder, remainder_size);
4133  trailer = chunk2mem (remainder);
4134  }
4135  }
4136 
4137  assert (chunksize (p) >= nb);
4138  assert ((((size_t) (chunk2mem (p))) % alignment) == 0);
4139  check_inuse_chunk (m, p);
4140  POSTACTION (m);
4141  if (leader != 0)
4142  {
4143  internal_free (m, leader);
4144  }
4145  if (trailer != 0)
4146  {
4147  internal_free (m, trailer);
4148  }
4149  return chunk2mem (p);
4150  }
4151  }
4152  return 0;
4153 }
4154 
4155 /* ------------------------ comalloc/coalloc support --------------------- */
4156 
4157 static void **
4158 ialloc (mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
4159 {
4160  /*
4161  This provides common support for independent_X routines, handling
4162  all of the combinations that can result.
4163 
4164  The opts arg has:
4165  bit 0 set if all elements are same size (using sizes[0])
4166  bit 1 set if elements should be zeroed
4167  */
4168 
4169  size_t element_size; /* chunksize of each element, if all same */
4170  size_t contents_size; /* total size of elements */
4171  size_t array_size; /* request size of pointer array */
4172  void *mem; /* malloced aggregate space */
4173  mchunkptr p; /* corresponding chunk */
4174  size_t remainder_size; /* remaining bytes while splitting */
4175  void **marray; /* either "chunks" or malloced ptr array */
4176  mchunkptr array_chunk; /* chunk for malloced ptr array */
4177  flag_t was_enabled; /* to disable mmap */
4178  size_t size;
4179  size_t i;
4180 
4181  /* compute array length, if needed */
4182  if (chunks != 0)
4183  {
4184  if (n_elements == 0)
4185  return chunks; /* nothing to do */
4186  marray = chunks;
4187  array_size = 0;
4188  }
4189  else
4190  {
4191  /* if empty req, must still return chunk representing empty array */
4192  if (n_elements == 0)
4193  return (void **) internal_malloc (m, 0);
4194  marray = 0;
4195  array_size = request2size (n_elements * (sizeof (void *)));
4196  }
4197 
4198  /* compute total element size */
4199  if (opts & 0x1)
4200  { /* all-same-size */
4201  element_size = request2size (*sizes);
4202  contents_size = n_elements * element_size;
4203  }
4204  else
4205  { /* add up all the sizes */
4206  element_size = 0;
4207  contents_size = 0;
4208  for (i = 0; i != n_elements; ++i)
4209  contents_size += request2size (sizes[i]);
4210  }
4211 
4212  size = contents_size + array_size;
4213 
4214  /*
4215  Allocate the aggregate chunk. First disable direct-mmapping so
4216  malloc won't use it, since we would not be able to later
4217  free/realloc space internal to a segregated mmap region.
4218  */
4219  was_enabled = use_mmap (m);
4220  disable_mmap (m);
4221  mem = internal_malloc (m, size - CHUNK_OVERHEAD);
4222  if (was_enabled)
4223  enable_mmap (m);
4224  if (mem == 0)
4225  return 0;
4226 
4227  if (PREACTION (m))
4228  return 0;
4229  p = mem2chunk (mem);
4230  remainder_size = chunksize (p);
4231 
4232  assert (!is_mmapped (p));
4233 
4234  if (opts & 0x2)
4235  { /* optionally clear the elements */
4236  memset ((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4237  }
4238 
4239  /* If not provided, allocate the pointer array as final part of chunk */
4240  if (marray == 0)
4241  {
4242  size_t array_chunk_size;
4243  array_chunk = chunk_plus_offset (p, contents_size);
4244  array_chunk_size = remainder_size - contents_size;
4245  marray = (void **) (chunk2mem (array_chunk));
4246  set_size_and_pinuse_of_inuse_chunk (m, array_chunk, array_chunk_size);
4247  remainder_size = contents_size;
4248  }
4249 
4250  /* split out elements */
4251  for (i = 0;; ++i)
4252  {
4253  marray[i] = chunk2mem (p);
4254  if (i != n_elements - 1)
4255  {
4256  if (element_size != 0)
4257  size = element_size;
4258  else
4259  size = request2size (sizes[i]);
4260  remainder_size -= size;
4262  p = chunk_plus_offset (p, size);
4263  }
4264  else
4265  { /* the final element absorbs any overallocation slop */
4266  set_size_and_pinuse_of_inuse_chunk (m, p, remainder_size);
4267  break;
4268  }
4269  }
4270 
4271 #if DEBUG
4272  if (marray != chunks)
4273  {
4274  /* final element must have exactly exhausted chunk */
4275  if (element_size != 0)
4276  {
4277  assert (remainder_size == element_size);
4278  }
4279  else
4280  {
4281  assert (remainder_size == request2size (sizes[i]));
4282  }
4283  check_inuse_chunk (m, mem2chunk (marray));
4284  }
4285  for (i = 0; i != n_elements; ++i)
4286  check_inuse_chunk (m, mem2chunk (marray[i]));
4287 
4288 #endif /* DEBUG */
4289 
4290  POSTACTION (m);
4291  return marray;
4292 }
4293 
4294 
4295 /* -------------------------- public routines ---------------------------- */
4296 
4297 #if !ONLY_MSPACES
4298 
4299 void *
4300 dlmalloc (size_t bytes)
4301 {
4302  /*
4303  Basic algorithm:
4304  If a small request (< 256 bytes minus per-chunk overhead):
4305  1. If one exists, use a remainderless chunk in associated smallbin.
4306  (Remainderless means that there are too few excess bytes to
4307  represent as a chunk.)
4308  2. If it is big enough, use the dv chunk, which is normally the
4309  chunk adjacent to the one used for the most recent small request.
4310  3. If one exists, split the smallest available chunk in a bin,
4311  saving remainder in dv.
4312  4. If it is big enough, use the top chunk.
4313  5. If available, get memory from system and use it
4314  Otherwise, for a large request:
4315  1. Find the smallest available binned chunk that fits, and use it
4316  if it is better fitting than dv chunk, splitting if necessary.
4317  2. If better fitting than any binned chunk, use the dv chunk.
4318  3. If it is big enough, use the top chunk.
4319  4. If request size >= mmap threshold, try to directly mmap this chunk.
4320  5. If available, get memory from system and use it
4321 
4322  The ugly goto's here ensure that postaction occurs along all paths.
4323  */
4324 
4325  if (!PREACTION (gm))
4326  {
4327  void *mem;
4328  size_t nb;
4329  if (bytes <= MAX_SMALL_REQUEST)
4330  {
4331  bindex_t idx;
4332  binmap_t smallbits;
4333  nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes);
4334  idx = small_index (nb);
4335  smallbits = gm->smallmap >> idx;
4336 
4337  if ((smallbits & 0x3U) != 0)
4338  { /* Remainderless fit to a smallbin. */
4339  mchunkptr b, p;
4340  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4341  b = smallbin_at (gm, idx);
4342  p = b->fd;
4343  assert (chunksize (p) == small_index2size (idx));
4344  unlink_first_small_chunk (gm, b, p, idx);
4346  mem = chunk2mem (p);
4347  check_malloced_chunk (gm, mem, nb);
4348  goto postaction;
4349  }
4350 
4351  else if (nb > gm->dvsize)
4352  {
4353  if (smallbits != 0)
4354  { /* Use chunk in next nonempty smallbin */
4355  mchunkptr b, p, r;
4356  size_t rsize;
4357  bindex_t i;
4358  binmap_t leftbits = (smallbits << idx) & left_bits (idx2bit (idx));
4359  binmap_t leastbit = least_bit (leftbits);
4360  compute_bit2idx (leastbit, i);
4361  b = smallbin_at (gm, i);
4362  p = b->fd;
4363  assert (chunksize (p) == small_index2size (i));
4364  unlink_first_small_chunk (gm, b, p, i);
4365  rsize = small_index2size (i) - nb;
4366  /* Fit here cannot be remainderless if 4byte sizes */
4367  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4369  else
4370  {
4372  r = chunk_plus_offset (p, nb);
4374  replace_dv (gm, r, rsize);
4375  }
4376  mem = chunk2mem (p);
4377  check_malloced_chunk (gm, mem, nb);
4378  goto postaction;
4379  }
4380 
4381  else if (gm->treemap != 0 && (mem = tmalloc_small (gm, nb)) != 0)
4382  {
4383  check_malloced_chunk (gm, mem, nb);
4384  goto postaction;
4385  }
4386  }
4387  }
4388  else if (bytes >= MAX_REQUEST)
4389  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4390  else
4391  {
4392  nb = pad_request (bytes);
4393  if (gm->treemap != 0 && (mem = tmalloc_large (gm, nb)) != 0)
4394  {
4395  check_malloced_chunk (gm, mem, nb);
4396  goto postaction;
4397  }
4398  }
4399 
4400  if (nb <= gm->dvsize)
4401  {
4402  size_t rsize = gm->dvsize - nb;
4403  mchunkptr p = gm->dv;
4404  if (rsize >= MIN_CHUNK_SIZE)
4405  { /* split dv */
4406  mchunkptr r = gm->dv = chunk_plus_offset (p, nb);
4407  gm->dvsize = rsize;
4410  }
4411  else
4412  { /* exhaust dv */
4413  size_t dvs = gm->dvsize;
4414  gm->dvsize = 0;
4415  gm->dv = 0;
4416  set_inuse_and_pinuse (gm, p, dvs);
4417  }
4418  mem = chunk2mem (p);
4419  check_malloced_chunk (gm, mem, nb);
4420  goto postaction;
4421  }
4422 
4423  else if (nb < gm->topsize)
4424  { /* Split top */
4425  size_t rsize = gm->topsize -= nb;
4426  mchunkptr p = gm->top;
4427  mchunkptr r = gm->top = chunk_plus_offset (p, nb);
4428  r->head = rsize | PINUSE_BIT;
4430  mem = chunk2mem (p);
4431  check_top_chunk (gm, gm->top);
4432  check_malloced_chunk (gm, mem, nb);
4433  goto postaction;
4434  }
4435 
4436  mem = sys_alloc (gm, nb);
4437 
4438  postaction:
4439  POSTACTION (gm);
4440  return mem;
4441  }
4442 
4443  return 0;
4444 }
4445 
4446 void
4447 dlfree (void *mem)
4448 {
4449  /*
4450  Consolidate freed chunks with preceeding or succeeding bordering
4451  free chunks, if they exist, and then place in a bin. Intermixed
4452  with special cases for top, dv, mmapped chunks, and usage errors.
4453  */
4454 
4455  if (mem != 0)
4456  {
4457  mchunkptr p = mem2chunk (mem);
4458 #if FOOTERS
4459  mstate fm = get_mstate_for (p);
4460  if (!ok_magic (fm))
4461  {
4462  USAGE_ERROR_ACTION (fm, p);
4463  return;
4464  }
4465 #else /* FOOTERS */
4466 #define fm gm
4467 #endif /* FOOTERS */
4468  if (!PREACTION (fm))
4469  {
4470  check_inuse_chunk (fm, p);
4471  if (RTCHECK (ok_address (fm, p) && ok_cinuse (p)))
4472  {
4473  size_t psize = chunksize (p);
4474  mchunkptr next = chunk_plus_offset (p, psize);
4475  if (!pinuse (p))
4476  {
4477  size_t prevsize = p->prev_foot;
4478  if ((prevsize & IS_MMAPPED_BIT) != 0)
4479  {
4480  prevsize &= ~IS_MMAPPED_BIT;
4481  psize += prevsize + MMAP_FOOT_PAD;
4482  if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0)
4483  fm->footprint -= psize;
4484  goto postaction;
4485  }
4486  else
4487  {
4488  mchunkptr prev = chunk_minus_offset (p, prevsize);
4489  psize += prevsize;
4490  p = prev;
4491  if (RTCHECK (ok_address (fm, prev)))
4492  { /* consolidate backward */
4493  if (p != fm->dv)
4494  {
4495  unlink_chunk (fm, p, prevsize);
4496  }
4497  else if ((next->head & INUSE_BITS) == INUSE_BITS)
4498  {
4499  fm->dvsize = psize;
4500  set_free_with_pinuse (p, psize, next);
4501  goto postaction;
4502  }
4503  }
4504  else
4505  goto erroraction;
4506  }
4507  }
4508 
4509  if (RTCHECK (ok_next (p, next) && ok_pinuse (next)))
4510  {
4511  if (!cinuse (next))
4512  { /* consolidate forward */
4513  if (next == fm->top)
4514  {
4515  size_t tsize = fm->topsize += psize;
4516  fm->top = p;
4517  p->head = tsize | PINUSE_BIT;
4518  if (p == fm->dv)
4519  {
4520  fm->dv = 0;
4521  fm->dvsize = 0;
4522  }
4523  if (should_trim (fm, tsize))
4524  sys_trim (fm, 0);
4525  goto postaction;
4526  }
4527  else if (next == fm->dv)
4528  {
4529  size_t dsize = fm->dvsize += psize;
4530  fm->dv = p;
4532  goto postaction;
4533  }
4534  else
4535  {
4536  size_t nsize = chunksize (next);
4537  psize += nsize;
4538  unlink_chunk (fm, next, nsize);
4540  if (p == fm->dv)
4541  {
4542  fm->dvsize = psize;
4543  goto postaction;
4544  }
4545  }
4546  }
4547  else
4548  set_free_with_pinuse (p, psize, next);
4549  insert_chunk (fm, p, psize);
4550  check_free_chunk (fm, p);
4551  goto postaction;
4552  }
4553  }
4554  erroraction:
4555  USAGE_ERROR_ACTION (fm, p);
4556  postaction:
4557  POSTACTION (fm);
4558  }
4559  }
4560 #if !FOOTERS
4561 #undef fm
4562 #endif /* FOOTERS */
4563 }
4564 
4565 void *
4566 dlcalloc (size_t n_elements, size_t elem_size)
4567 {
4568  void *mem;
4569  size_t req = 0;
4570  if (n_elements != 0)
4571  {
4572  req = n_elements * elem_size;
4573  if (((n_elements | elem_size) & ~(size_t) 0xffff) && (req / n_elements != elem_size))
4574  req = MAX_SIZE_T; /* force downstream failure on overflow */
4575  }
4576  mem = dlmalloc (req);
4577  if (mem != 0 && calloc_must_clear (mem2chunk (mem)))
4578  memset (mem, 0, req);
4579  return mem;
4580 }
4581 
4582 void *
4583 dlrealloc (void *oldmem, size_t bytes)
4584 {
4585  if (oldmem == 0)
4586  return dlmalloc (bytes);
4587 #ifdef REALLOC_ZERO_BYTES_FREES
4588  if (bytes == 0)
4589  {
4590  dlfree (oldmem);
4591  return 0;
4592  }
4593 #endif /* REALLOC_ZERO_BYTES_FREES */
4594  else
4595  {
4596 #if ! FOOTERS
4597  mstate m = gm;
4598 #else /* FOOTERS */
4599  mstate m = get_mstate_for (mem2chunk (oldmem));
4600  if (!ok_magic (m))
4601  {
4602  USAGE_ERROR_ACTION (m, oldmem);
4603  return 0;
4604  }
4605 #endif /* FOOTERS */
4606  return internal_realloc (m, oldmem, bytes);
4607  }
4608 }
4609 
4610 void *
4611 dlmemalign (size_t alignment, size_t bytes)
4612 {
4613  return internal_memalign (gm, alignment, bytes);
4614 }
4615 
4616 void **
4617 dlindependent_calloc (size_t n_elements, size_t elem_size, void *chunks[])
4618 {
4619  size_t sz = elem_size; /* serves as 1-element array */
4620  return ialloc (gm, n_elements, &sz, 3, chunks);
4621 }
4622 
4623 void **
4624 dlindependent_comalloc (size_t n_elements, size_t sizes[], void *chunks[])
4625 {
4626  return ialloc (gm, n_elements, sizes, 0, chunks);
4627 }
4628 
4629 void *
4630 dlvalloc (size_t bytes)
4631 {
4632  size_t pagesz;
4633  init_mparams ();
4634  pagesz = mparams.page_size;
4635  return dlmemalign (pagesz, bytes);
4636 }
4637 
4638 void *
4639 dlpvalloc (size_t bytes)
4640 {
4641  size_t pagesz;
4642  init_mparams ();
4643  pagesz = mparams.page_size;
4644  return dlmemalign (pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4645 }
4646 
4647 int
4648 dlmalloc_trim (size_t pad)
4649 {
4650  int result = 0;
4651  if (!PREACTION (gm))
4652  {
4653  result = sys_trim (gm, pad);
4654  POSTACTION (gm);
4655  }
4656  return result;
4657 }
4658 
4659 size_t
4661 {
4662  return gm->footprint;
4663 }
4664 
4665 size_t
4667 {
4668  return gm->max_footprint;
4669 }
4670 
4671 #if !NO_MALLINFO
4672 struct mallinfo
4674 {
4675  return internal_mallinfo (gm);
4676 }
4677 #endif /* NO_MALLINFO */
4678 
4679 void
4681 {
4683 }
4684 
4685 size_t
4687 {
4688  if (mem != 0)
4689  {
4690  mchunkptr p = mem2chunk (mem);
4691  if (cinuse (p))
4692  return chunksize (p) - overhead_for (p);
4693  }
4694  return 0;
4695 }
4696 
4697 int
4698 dlmallopt (int param_number, int value)
4699 {
4700  return change_mparam (param_number, value);
4701 }
4702 
4703 #endif /* !ONLY_MSPACES */
4704 
4705 /* ----------------------------- user mspaces ---------------------------- */
4706 
4707 #if MSPACES
4708 
4709 static mstate
4710 init_user_mstate (char *tbase, size_t tsize)
4711 {
4712  size_t msize = pad_request (sizeof (struct malloc_state));
4713  mchunkptr mn;
4714  mchunkptr msp = align_as_chunk (tbase);
4715  mstate m = (mstate) (chunk2mem (msp));
4716  memset (m, 0, msize);
4717  INITIAL_LOCK (&m->mutex);
4718  msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
4719  m->seg.base = m->least_addr = tbase;
4720  m->seg.size = m->footprint = m->max_footprint = tsize;
4721  m->magic = mparams.magic;
4723  disable_contiguous (m);
4724  init_bins (m);
4725  mn = next_chunk (mem2chunk (m));
4726  init_top (m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
4727  check_top_chunk (m, m->top);
4728  return m;
4729 }
4730 
4731 mspace
4732 create_mspace (size_t capacity, int locked)
4733 {
4734  mstate m = 0;
4735  size_t msize = pad_request (sizeof (struct malloc_state));
4736  init_mparams (); /* Ensure pagesize etc initialized */
4737 
4738  if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size))
4739  {
4740  size_t rs = ((capacity == 0) ? mparams.granularity : (capacity + TOP_FOOT_SIZE + msize));
4741  size_t tsize = granularity_align (rs);
4742  char *tbase = (char *) (CALL_MMAP (tsize));
4743  if (tbase != CMFAIL)
4744  {
4745  m = init_user_mstate (tbase, tsize);
4746  m->seg.sflags = IS_MMAPPED_BIT;
4747  set_lock (m, locked);
4748  }
4749  }
4750  return (mspace) m;
4751 }
4752 
4753 mspace
4754 create_mspace_with_base (void *base, size_t capacity, int locked)
4755 {
4756  mstate m = 0;
4757  size_t msize = pad_request (sizeof (struct malloc_state));
4758  init_mparams (); /* Ensure pagesize etc initialized */
4759 
4760  if (capacity > msize + TOP_FOOT_SIZE && capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size))
4761  {
4762  m = init_user_mstate ((char *) base, capacity);
4763  m->seg.sflags = EXTERN_BIT;
4764  set_lock (m, locked);
4765  }
4766  return (mspace) m;
4767 }
4768 
4769 size_t
4770 destroy_mspace (mspace msp)
4771 {
4772  size_t freed = 0;
4773  mstate ms = (mstate) msp;
4774  if (ok_magic (ms))
4775  {
4776  msegmentptr sp = &ms->seg;
4777  while (sp != 0)
4778  {
4779  char *base = sp->base;
4780  size_t size = sp->size;
4781  flag_t flag = sp->sflags;
4782  sp = sp->next;
4783  if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP (base, size) == 0)
4784  freed += size;
4785  }
4786  }
4787  else
4788  {
4789  USAGE_ERROR_ACTION (ms, ms);
4790  }
4791  return freed;
4792 }
4793 
4794 /*
4795  mspace versions of routines are near-clones of the global
4796  versions. This is not so nice but better than the alternatives.
4797 */
4798 
4799 
4800 void *
4801 mspace_malloc (mspace msp, size_t bytes)
4802 {
4803  mstate ms = (mstate) msp;
4804  if (!ok_magic (ms))
4805  {
4806  USAGE_ERROR_ACTION (ms, ms);
4807  return 0;
4808  }
4809  if (!PREACTION (ms))
4810  {
4811  void *mem;
4812  size_t nb;
4813  if (bytes <= MAX_SMALL_REQUEST)
4814  {
4815  bindex_t idx;
4816  binmap_t smallbits;
4817  nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes);
4818  idx = (bindex_t) small_index (nb);
4819  smallbits = ms->smallmap >> idx;
4820 
4821  if ((smallbits & 0x3U) != 0)
4822  { /* Remainderless fit to a smallbin. */
4823  mchunkptr b, p;
4824  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4825  b = smallbin_at (ms, idx);
4826  p = b->fd;
4827  assert (chunksize (p) == small_index2size (idx));
4828  unlink_first_small_chunk (ms, b, p, idx);
4829  set_inuse_and_pinuse (ms, p, small_index2size (idx));
4830  mem = chunk2mem (p);
4831  check_malloced_chunk (ms, mem, nb);
4832  goto postaction;
4833  }
4834 
4835  else if (nb > ms->dvsize)
4836  {
4837  if (smallbits != 0)
4838  { /* Use chunk in next nonempty smallbin */
4839  mchunkptr b, p, r;
4840  size_t rsize;
4841  bindex_t i;
4842  binmap_t leftbits = (smallbits << idx) & left_bits (idx2bit (idx));
4843  binmap_t leastbit = least_bit (leftbits);
4844  compute_bit2idx (leastbit, i);
4845  b = smallbin_at (ms, i);
4846  p = b->fd;
4847  assert (chunksize (p) == small_index2size (i));
4848  unlink_first_small_chunk (ms, b, p, i);
4849  rsize = small_index2size (i) - nb;
4850  /* Fit here cannot be remainderless if 4byte sizes */
4851  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4853  else
4854  {
4856  r = chunk_plus_offset (p, nb);
4858  replace_dv (ms, r, (bindex_t) rsize);
4859  }
4860  mem = chunk2mem (p);
4861  check_malloced_chunk (ms, mem, nb);
4862  goto postaction;
4863  }
4864 
4865  else if (ms->treemap != 0 && (mem = tmalloc_small (ms, nb)) != 0)
4866  {
4867  check_malloced_chunk (ms, mem, nb);
4868  goto postaction;
4869  }
4870  }
4871  }
4872  else if (bytes >= MAX_REQUEST)
4873  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4874  else
4875  {
4876  nb = pad_request (bytes);
4877  if (ms->treemap != 0 && (mem = tmalloc_large (ms, nb)) != 0)
4878  {
4879  check_malloced_chunk (ms, mem, nb);
4880  goto postaction;
4881  }
4882  }
4883 
4884  if (nb <= ms->dvsize)
4885  {
4886  size_t rsize = ms->dvsize - nb;
4887  mchunkptr p = ms->dv;
4888  if (rsize >= MIN_CHUNK_SIZE)
4889  { /* split dv */
4890  mchunkptr r = ms->dv = chunk_plus_offset (p, nb);
4891  ms->dvsize = rsize;
4894  }
4895  else
4896  { /* exhaust dv */
4897  size_t dvs = ms->dvsize;
4898  ms->dvsize = 0;
4899  ms->dv = 0;
4900  set_inuse_and_pinuse (ms, p, dvs);
4901  }
4902  mem = chunk2mem (p);
4903  check_malloced_chunk (ms, mem, nb);
4904  goto postaction;
4905  }
4906 
4907  else if (nb < ms->topsize)
4908  { /* Split top */
4909  size_t rsize = ms->topsize -= nb;
4910  mchunkptr p = ms->top;
4911  mchunkptr r = ms->top = chunk_plus_offset (p, nb);
4912  r->head = rsize | PINUSE_BIT;
4914  mem = chunk2mem (p);
4915  check_top_chunk (ms, ms->top);
4916  check_malloced_chunk (ms, mem, nb);
4917  goto postaction;
4918  }
4919 
4920  mem = sys_alloc (ms, nb);
4921 
4922  postaction:
4923  POSTACTION (ms);
4924  return mem;
4925  }
4926 
4927  return 0;
4928 }
4929 
4930 void
4931 mspace_free (mspace msp, void *mem)
4932 {
4933  if (mem != 0)
4934  {
4935  mchunkptr p = mem2chunk (mem);
4936 #if FOOTERS
4937  mstate fm = get_mstate_for (p);
4938 #else /* FOOTERS */
4939  mstate fm = (mstate) msp;
4940 #endif /* FOOTERS */
4941  if (!ok_magic (fm))
4942  {
4943  USAGE_ERROR_ACTION (fm, p);
4944  return;
4945  }
4946  if (!PREACTION (fm))
4947  {
4948  check_inuse_chunk (fm, p);
4949  if (RTCHECK (ok_address (fm, p) && ok_cinuse (p)))
4950  {
4951  size_t psize = chunksize (p);
4952  mchunkptr next = chunk_plus_offset (p, psize);
4953  if (!pinuse (p))
4954  {
4955  size_t prevsize = p->prev_foot;
4956  if ((prevsize & IS_MMAPPED_BIT) != 0)
4957  {
4958  prevsize &= ~IS_MMAPPED_BIT;
4959  psize += prevsize + MMAP_FOOT_PAD;
4960 #if defined(ENABLE_SEPARATE_MMAP_EVENT_TRACE)
4961  do
4962  {
4963  void *ptr = (char *) p - prevsize;
4964  MMAP_TRACE_H *h = (MMAP_TRACE_H *) ((char *) next - MMAP_TRACE_H_SIZE);
4965  munmap_is_to_be_called (msp, ptr, h);
4966  }
4967  while (0);
4968 #endif
4969  if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0)
4970  fm->footprint -= psize;
4971  goto postaction;
4972  }
4973  else
4974  {
4975  mchunkptr prev = chunk_minus_offset (p, prevsize);
4976  psize += prevsize;
4977  p = prev;
4978  if (RTCHECK (ok_address (fm, prev)))
4979  { /* consolidate backward */
4980  if (p != fm->dv)
4981  {
4982  unlink_chunk (fm, p, (bindex_t) prevsize);
4983  }
4984  else if ((next->head & INUSE_BITS) == INUSE_BITS)
4985  {
4986  fm->dvsize = psize;
4987  set_free_with_pinuse (p, psize, next);
4988  goto postaction;
4989  }
4990  }
4991  else
4992  goto erroraction;
4993  }
4994  }
4995 
4996  if (RTCHECK (ok_next (p, next) && ok_pinuse (next)))
4997  {
4998  if (!cinuse (next))
4999  { /* consolidate forward */
5000  if (next == fm->top)
5001  {
5002  size_t tsize = fm->topsize += psize;
5003  fm->top = p;
5004  p->head = tsize | PINUSE_BIT;
5005  if (p == fm->dv)
5006  {
5007  fm->dv = 0;
5008  fm->dvsize = 0;
5009  }
5010  if (should_trim (fm, tsize))
5011  sys_trim (fm, 0);
5012  goto postaction;
5013  }
5014  else if (next == fm->dv)
5015  {
5016  size_t dsize = fm->dvsize += psize;
5017  fm->dv = p;
5019  goto postaction;
5020  }
5021  else
5022  {
5023  size_t nsize = chunksize (next);
5024  psize += nsize;
5025  unlink_chunk (fm, next, (bindex_t) nsize);
5027  if (p == fm->dv)
5028  {
5029  fm->dvsize = psize;
5030  goto postaction;
5031  }
5032  }
5033  }
5034  else
5035  set_free_with_pinuse (p, psize, next);
5036  insert_chunk (fm, p, (bindex_t) psize);
5037  check_free_chunk (fm, p);
5038  goto postaction;
5039  }
5040  }
5041  erroraction:
5042  USAGE_ERROR_ACTION (fm, p);
5043  postaction:
5044  POSTACTION (fm);
5045  }
5046  }
5047 }
5048 
5049 void *
5050 mspace_calloc (mspace msp, size_t n_elements, size_t elem_size)
5051 {
5052  void *mem;
5053  size_t req = 0;
5054  mstate ms = (mstate) msp;
5055  if (!ok_magic (ms))
5056  {
5057  USAGE_ERROR_ACTION (ms, ms);
5058  return 0;
5059  }
5060  if (n_elements != 0)
5061  {
5062  req = n_elements * elem_size;
5063  if (((n_elements | elem_size) & ~(size_t) 0xffff) && (req / n_elements != elem_size))
5064  req = MAX_SIZE_T; /* force downstream failure on overflow */
5065  }
5066  mem = internal_malloc (ms, req);
5067  if (mem != 0 && calloc_must_clear (mem2chunk (mem)))
5068  memset (mem, 0, req);
5069  return mem;
5070 }
5071 
5072 void *
5073 mspace_realloc (mspace msp, void *oldmem, size_t bytes)
5074 {
5075  if (oldmem == 0)
5076  return mspace_malloc (msp, bytes);
5077 #ifdef REALLOC_ZERO_BYTES_FREES
5078  if (bytes == 0)
5079  {
5080  mspace_free (msp, oldmem);
5081  return 0;
5082  }
5083 #endif /* REALLOC_ZERO_BYTES_FREES */
5084  else
5085  {
5086 #if FOOTERS
5087  mchunkptr p = mem2chunk (oldmem);
5088  mstate ms = get_mstate_for (p);
5089 #else /* FOOTERS */
5090  mstate ms = (mstate) msp;
5091 #endif /* FOOTERS */
5092  if (!ok_magic (ms))
5093  {
5094  USAGE_ERROR_ACTION (ms, ms);
5095  return 0;
5096  }
5097  return internal_realloc (ms, oldmem, bytes);
5098  }
5099 }
5100 
5101 void *
5102 mspace_memalign (mspace msp, size_t alignment, size_t bytes)
5103 {
5104  mstate ms = (mstate) msp;
5105  if (!ok_magic (ms))
5106  {
5107  USAGE_ERROR_ACTION (ms, ms);
5108  return 0;
5109  }
5110  return internal_memalign (ms, alignment, bytes);
5111 }
5112 
5113 void **
5114 mspace_independent_calloc (mspace msp, size_t n_elements, size_t elem_size, void *chunks[])
5115 {
5116  size_t sz = elem_size; /* serves as 1-element array */
5117  mstate ms = (mstate) msp;
5118  if (!ok_magic (ms))
5119  {
5120  USAGE_ERROR_ACTION (ms, ms);
5121  return 0;
5122  }
5123  return ialloc (ms, n_elements, &sz, 3, chunks);
5124 }
5125 
5126 void **
5127 mspace_independent_comalloc (mspace msp, size_t n_elements, size_t sizes[], void *chunks[])
5128 {
5129  mstate ms = (mstate) msp;
5130  if (!ok_magic (ms))
5131  {
5132  USAGE_ERROR_ACTION (ms, ms);
5133  return 0;
5134  }
5135  return ialloc (ms, n_elements, sizes, 0, chunks);
5136 }
5137 
5138 int
5139 mspace_trim (mspace msp, size_t pad)
5140 {
5141  int result = 0;
5142  mstate ms = (mstate) msp;
5143  if (ok_magic (ms))
5144  {
5145  if (!PREACTION (ms))
5146  {
5147  result = sys_trim (ms, pad);
5148  POSTACTION (ms);
5149  }
5150  }
5151  else
5152  {
5153  USAGE_ERROR_ACTION (ms, ms);
5154  }
5155  return result;
5156 }
5157 
5158 void
5159 mspace_malloc_stats (mspace msp)
5160 {
5161  mstate ms = (mstate) msp;
5162  if (ok_magic (ms))
5163  {
5164  internal_malloc_stats (ms);
5165  }
5166  else
5167  {
5168  USAGE_ERROR_ACTION (ms, ms);
5169  }
5170 }
5171 
5172 size_t
5173 mspace_footprint (mspace msp)
5174 {
5175  size_t result = 0;
5176  mstate ms = (mstate) msp;
5177  if (ok_magic (ms))
5178  {
5179  result = ms->footprint;
5180  }
5181  else
5182  {
5183  USAGE_ERROR_ACTION (ms, ms);
5184  }
5185  return result;
5186 }
5187 
5188 
5189 size_t
5190 mspace_max_footprint (mspace msp)
5191 {
5192  size_t result = 0;
5193  mstate ms = (mstate) msp;
5194  if (ok_magic (ms))
5195  {
5196  result = ms->max_footprint;
5197  }
5198  else
5199  {
5200  USAGE_ERROR_ACTION (ms, ms);
5201  }
5202  return result;
5203 }
5204 
5205 
5206 #if !NO_MALLINFO
5207 struct mallinfo
5208 mspace_mallinfo (mspace msp)
5209 {
5210  mstate ms = (mstate) msp;
5211  if (!ok_magic (ms))
5212  {
5213  USAGE_ERROR_ACTION (ms, ms);
5214  }
5215  return internal_mallinfo (ms);
5216 }
5217 #endif /* NO_MALLINFO */
5218 
5219 int
5220 mspace_mallopt (int param_number, int value)
5221 {
5222  return change_mparam (param_number, value);
5223 }
5224 
5225 #endif /* MSPACES */
5226 
5227 /* -------------------- Alternative MORECORE functions ------------------- */
5228 
5229 /*
5230  Guidelines for creating a custom version of MORECORE:
5231 
5232  * For best performance, MORECORE should allocate in multiples of pagesize.
5233  * MORECORE may allocate more memory than requested. (Or even less,
5234  but this will usually result in a malloc failure.)
5235  * MORECORE must not allocate memory when given argument zero, but
5236  instead return one past the end address of memory from previous
5237  nonzero call.
5238  * For best performance, consecutive calls to MORECORE with positive
5239  arguments should return increasing addresses, indicating that
5240  space has been contiguously extended.
5241  * Even though consecutive calls to MORECORE need not return contiguous
5242  addresses, it must be OK for malloc'ed chunks to span multiple
5243  regions in those cases where they do happen to be contiguous.
5244  * MORECORE need not handle negative arguments -- it may instead
5245  just return MFAIL when given negative arguments.
5246  Negative arguments are always multiples of pagesize. MORECORE
5247  must not misinterpret negative args as large positive unsigned
5248  args. You can suppress all such calls from even occurring by defining
5249  MORECORE_CANNOT_TRIM,
5250 
5251  As an example alternative MORECORE, here is a custom allocator
5252  kindly contributed for pre-OSX macOS. It uses virtually but not
5253  necessarily physically contiguous non-paged memory (locked in,
5254  present and won't get swapped out). You can use it by uncommenting
5255  this section, adding some #includes, and setting up the appropriate
5256  defines above:
5257 
5258  #define MORECORE osMoreCore
5259 
5260  There is also a shutdown routine that should somehow be called for
5261  cleanup upon program exit.
5262 
5263  #define MAX_POOL_ENTRIES 100
5264  #define MINIMUM_MORECORE_SIZE (64 * 1024U)
5265  static int next_os_pool;
5266  void *our_os_pools[MAX_POOL_ENTRIES];
5267 
5268  void *osMoreCore(int size)
5269  {
5270  void *ptr = 0;
5271  static void *sbrk_top = 0;
5272 
5273  if (size > 0)
5274  {
5275  if (size < MINIMUM_MORECORE_SIZE)
5276  size = MINIMUM_MORECORE_SIZE;
5277  if (CurrentExecutionLevel() == kTaskLevel)
5278  ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5279  if (ptr == 0)
5280  {
5281  return (void *) MFAIL;
5282  }
5283  // save ptrs so they can be freed during cleanup
5284  our_os_pools[next_os_pool] = ptr;
5285  next_os_pool++;
5286  ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5287  sbrk_top = (char *) ptr + size;
5288  return ptr;
5289  }
5290  else if (size < 0)
5291  {
5292  // we don't currently support shrink behavior
5293  return (void *) MFAIL;
5294  }
5295  else
5296  {
5297  return sbrk_top;
5298  }
5299  }
5300 
5301  // cleanup any allocated memory pools
5302  // called as last thing before shutting down driver
5303 
5304  void osCleanupMem(void)
5305  {
5306  void **ptr;
5307 
5308  for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5309  if (*ptr)
5310  {
5311  PoolDeallocate(*ptr);
5312  *ptr = 0;
5313  }
5314  }
5315 
5316 */
5317 
5318 
5319 /* -----------------------------------------------------------------------
5320 History:
5321  V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
5322  * Add max_footprint functions
5323  * Ensure all appropriate literals are size_t
5324  * Fix conditional compilation problem for some #define settings
5325  * Avoid concatenating segments with the one provided
5326  in create_mspace_with_base
5327  * Rename some variables to avoid compiler shadowing warnings
5328  * Use explicit lock initialization.
5329  * Better handling of sbrk interference.
5330  * Simplify and fix segment insertion, trimming and mspace_destroy
5331  * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
5332  * Thanks especially to Dennis Flanagan for help on these.
5333 
5334  V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
5335  * Fix memalign brace error.
5336 
5337  V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
5338  * Fix improper #endif nesting in C++
5339  * Add explicit casts needed for C++
5340 
5341  V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
5342  * Use trees for large bins
5343  * Support mspaces
5344  * Use segments to unify sbrk-based and mmap-based system allocation,
5345  removing need for emulation on most platforms without sbrk.
5346  * Default safety checks
5347  * Optional footer checks. Thanks to William Robertson for the idea.
5348  * Internal code refactoring
5349  * Incorporate suggestions and platform-specific changes.
5350  Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
5351  Aaron Bachmann, Emery Berger, and others.
5352  * Speed up non-fastbin processing enough to remove fastbins.
5353  * Remove useless cfree() to avoid conflicts with other apps.
5354  * Remove internal memcpy, memset. Compilers handle builtins better.
5355  * Remove some options that no one ever used and rename others.
5356 
5357  V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5358  * Fix malloc_state bitmap array misdeclaration
5359 
5360  V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
5361  * Allow tuning of FIRST_SORTED_BIN_SIZE
5362  * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5363  * Better detection and support for non-contiguousness of MORECORE.
5364  Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5365  * Bypass most of malloc if no frees. Thanks To Emery Berger.
5366  * Fix freeing of old top non-contiguous chunk im sysmalloc.
5367  * Raised default trim and map thresholds to 256K.
5368  * Fix mmap-related #defines. Thanks to Lubos Lunak.
5369  * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5370  * Branch-free bin calculation
5371  * Default trim and mmap thresholds now 256K.
5372 
5373  V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5374  * Introduce independent_comalloc and independent_calloc.
5375  Thanks to Michael Pachos for motivation and help.
5376  * Make optional .h file available
5377  * Allow > 2GB requests on 32bit systems.
5378  * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5379  Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5380  and Anonymous.
5381  * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5382  helping test this.)
5383  * memalign: check alignment arg
5384  * realloc: don't try to shift chunks backwards, since this
5385  leads to more fragmentation in some programs and doesn't
5386  seem to help in any others.
5387  * Collect all cases in malloc requiring system memory into sysmalloc
5388  * Use mmap as backup to sbrk
5389  * Place all internal state in malloc_state
5390  * Introduce fastbins (although similar to 2.5.1)
5391  * Many minor tunings and cosmetic improvements
5392  * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5393  * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5394  Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5395  * Include errno.h to support default failure action.
5396 
5397  V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5398  * return null for negative arguments
5399  * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5400  * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5401  (e.g. WIN32 platforms)
5402  * Cleanup header file inclusion for WIN32 platforms
5403  * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5404  * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5405  memory allocation routines
5406  * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5407  * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5408  usage of 'assert' in non-WIN32 code
5409  * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5410  avoid infinite loop
5411  * Always call 'fREe()' rather than 'free()'
5412 
5413  V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5414  * Fixed ordering problem with boundary-stamping
5415 
5416  V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5417  * Added pvalloc, as recommended by H.J. Liu
5418  * Added 64bit pointer support mainly from Wolfram Gloger
5419  * Added anonymously donated WIN32 sbrk emulation
5420  * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5421  * malloc_extend_top: fix mask error that caused wastage after
5422  foreign sbrks
5423  * Add linux mremap support code from HJ Liu
5424 
5425  V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5426  * Integrated most documentation with the code.
5427  * Add support for mmap, with help from
5428  Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5429  * Use last_remainder in more cases.
5430  * Pack bins using idea from colin@nyx10.cs.du.edu
5431  * Use ordered bins instead of best-fit threshhold
5432  * Eliminate block-local decls to simplify tracing and debugging.
5433  * Support another case of realloc via move into top
5434  * Fix error occuring when initial sbrk_base not word-aligned.
5435  * Rely on page size for units instead of SBRK_UNIT to
5436  avoid surprises about sbrk alignment conventions.
5437  * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5438  (raymond@es.ele.tue.nl) for the suggestion.
5439  * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5440  * More precautions for cases where other routines call sbrk,
5441  courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5442  * Added macros etc., allowing use in linux libc from
5443  H.J. Lu (hjl@gnu.ai.mit.edu)
5444  * Inverted this history list
5445 
5446  V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5447  * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5448  * Removed all preallocation code since under current scheme
5449  the work required to undo bad preallocations exceeds
5450  the work saved in good cases for most test programs.
5451  * No longer use return list or unconsolidated bins since
5452  no scheme using them consistently outperforms those that don't
5453  given above changes.
5454  * Use best fit for very large chunks to prevent some worst-cases.
5455  * Added some support for debugging
5456 
5457  V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5458  * Removed footers when chunks are in use. Thanks to
5459  Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5460 
5461  V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5462  * Added malloc_trim, with help from Wolfram Gloger
5463  (wmglo@Dent.MED.Uni-Muenchen.DE).
5464 
5465  V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5466 
5467  V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5468  * realloc: try to expand in both directions
5469  * malloc: swap order of clean-bin strategy;
5470  * realloc: only conditionally expand backwards
5471  * Try not to scavenge used bins
5472  * Use bin counts as a guide to preallocation
5473  * Occasionally bin return list chunks in first scan
5474  * Add a few optimizations from colin@nyx10.cs.du.edu
5475 
5476  V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5477  * faster bin computation & slightly different binning
5478  * merged all consolidations to one part of malloc proper
5479  (eliminating old malloc_find_space & malloc_clean_bin)
5480  * Scan 2 returns chunks (not just 1)
5481  * Propagate failure in realloc if malloc returns 0
5482  * Add stuff to allow compilation on non-ANSI compilers
5483  from kpv@research.att.com
5484 
5485  V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5486  * removed potential for odd address access in prev_chunk
5487  * removed dependency on getpagesize.h
5488  * misc cosmetics and a bit more internal documentation
5489  * anticosmetics: mangled names in macros to evade debugger strangeness
5490  * tested on sparc, hp-700, dec-mips, rs6000
5491  with gcc & native cc (hp, dec only) allowing
5492  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5493 
5494  Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5495  * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5496  structure of old version, but most details differ.)
5497 
5498 */
5499 /*===========================================================================*/
5500 /*===========================================================================*/
5501 /*===========================================================================*/
#define MIN_CHUNK_SIZE
#define treemap_is_marked(M, i)
static struct malloc_params mparams
#define MAX_SMALL_REQUEST
#define check_inuse_chunk(M, P)
#define mark_inuse_foot(M, p, s)
#define check_malloced_chunk(M, P, N)
#define idx2bit(i)
#define EXTERN_BIT
#define dlmalloc_usable_size
Definition: malloc_2_8_3.c:691
flag_t default_mflags
#define dlcalloc
Definition: malloc_2_8_3.c:680
#define dlmalloc_stats
Definition: malloc_2_8_3.c:690
static void * sys_alloc(mstate m, size_t nb)
#define set_inuse(M, p, s)
#define CORRUPTION_ERROR_ACTION(m)
mchunkptr top
#define dlmallinfo
Definition: malloc_2_8_3.c:687
#define chunk2mem(p)
MALLINFO_FIELD_TYPE arena
Definition: malloc_2_8_3.c:655
#define granularity_align(S)
#define dlfree
Definition: malloc_2_8_3.c:681
MALLINFO_FIELD_TYPE hblks
Definition: malloc_2_8_3.c:658
struct malloc_chunk * bk
static msegmentptr segment_holding(mstate m, char *addr)
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
#define overhead_for(p)
#define USAGE_ERROR_ACTION(m, p)
#define MORECORE_CONTIGUOUS
Definition: malloc_2_8_3.c:574
#define disable_mmap(M)
#define is_extern_segment(S)
static API_MUTEX mutex
Definition: api_util.c:72
struct malloc_tree_chunk * parent
#define left_bits(x)
#define HAVE_MMAP
Definition: malloc_2_8_3.c:545
#define use_mmap(M)
#define PREACTION(M)
#define small_index2size(i)
#define chunk_minus_offset(p, s)
#define ABORT
Definition: malloc_2_8_3.c:530
#define unlink_first_small_chunk(M, B, P, I)
#define is_page_aligned(S)
#define chunksize(p)
static void * tmalloc_small(mstate m, size_t nb)
tbinptr treebins[NTREEBINS]
size_t trim_check
#define pad_request(req)
#define dlmallopt
Definition: malloc_2_8_3.c:688
#define USE_MMAP_BIT
MALLINFO_FIELD_TYPE ordblks
Definition: malloc_2_8_3.c:656
#define is_small(s)
static int change_mparam(int param_number, int value)
#define NTREEBINS
#define CINUSE_BIT
#define ACQUIRE_MAGIC_INIT_LOCK()
unsigned int bindex_t
size_t max_footprint
#define set_size_and_pinuse_of_free_chunk(p, s)
#define dlpvalloc
Definition: malloc_2_8_3.c:686
struct malloc_tree_chunk * tchunkptr
struct malloc_chunk * mchunkptr
#define internal_malloc(m, b)
msegment seg
struct malloc_tree_chunk * fd
unsigned int flag_t
#define HALF_MAX_SIZE_T
static void * tmalloc_large(mstate m, size_t nb)
#define POSTACTION(M)
#define use_noncontiguous(M)
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
#define IS_MMAPPED_BIT
#define dlvalloc
Definition: malloc_2_8_3.c:685
#define dlmemalign
Definition: malloc_2_8_3.c:683
#define SIZE_T_SIZE
struct malloc_state * mstate
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
#define CALL_MREMAP(addr, osz, nsz, mv)
#define NSMALLBINS
#define CALL_MMAP(s)
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
#define HAVE_MORECORE
Definition: malloc_2_8_3.c:564
struct malloc_segment * next
#define next_pinuse(p)
static void mmap_called(void *m, void *ptr, MMAP_TRACE_H *h)
Definition: lea_heap.c:182
#define CHUNK_ALIGN_MASK
#define dlmalloc_max_footprint
Definition: malloc_2_8_3.c:693
static void * mmap_alloc(mstate m, size_t nb)
#define ok_cinuse(p)
#define assert(x)
#define RELEASE_MORECORE_LOCK()
#define small_index(s)
#define dlrealloc
Definition: malloc_2_8_3.c:684
mchunkptr smallbins[(NSMALLBINS+1)*2]
size_t prev_foot
#define INITIAL_LOCK(l)
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
#define minsize_for_tree_index(i)
#define RTCHECK(e)
size_t trim_threshold
#define dlindependent_calloc
Definition: malloc_2_8_3.c:694
#define check_free_chunk(M, P)
#define FOUR_SIZE_T_SIZES
#define USE_NONCONTIGUOUS_BIT
#define least_bit(x)
#define dlindependent_comalloc
Definition: malloc_2_8_3.c:695
#define segment_holds(S, A)
#define USE_MALLOC_INSTEAD
#define MAX_SIZE_T
Definition: malloc_2_8_3.c:511
#define align_as_chunk(A)
binmap_t smallmap
#define gm
#define malloc_getpagesize
#define SIZE_T_BITSIZE
#define is_aligned(A)
char * least_addr
#define chunk_plus_offset(p, s)
static struct mallinfo internal_mallinfo(mstate m)
static int has_segment_link(mstate m, msegmentptr ss)
#define ACQUIRE_MORECORE_LOCK()
struct malloc_segment * msegmentptr
MALLINFO_FIELD_TYPE fordblks
Definition: malloc_2_8_3.c:663
#define MFAIL
#define INUSE_BITS
#define SIZE_T_ONE
#define MMAP_FOOT_PAD
struct malloc_chunk * fd
#define compute_tree_index(S, I)
#define CALL_MUNMAP(a, s)
#define leftshift_for_tree_index(i)
static int init_mparams(void)
#define treebin_at(M, i)
#define M_MMAP_THRESHOLD
Definition: malloc_2_8_3.c:620
#define PINUSE_BIT
#define cinuse(p)
#define page_align(S)
#define compute_bit2idx(X, I)
static void * internal_realloc(mstate m, void *oldmem, size_t bytes)
binmap_t treemap
#define ok_address(M, a)
struct malloc_tree_chunk * tbinptr
#define dlmalloc_trim
Definition: malloc_2_8_3.c:689
#define DIRECT_MMAP(s)
MALLINFO_FIELD_TYPE fsmblks
Definition: malloc_2_8_3.c:661
#define unlink_chunk(M, P, S)
#define internal_free(m, mem)
#define dlmalloc
Definition: malloc_2_8_3.c:682
static void internal_malloc_stats(mstate m)
MALLINFO_FIELD_TYPE keepcost
Definition: malloc_2_8_3.c:664
unsigned int binmap_t
#define ok_magic(M)
#define TOP_FOOT_SIZE
struct malloc_tree_chunk * bk
#define request2size(req)
#define DEFAULT_GRANULARITY
Definition: malloc_2_8_3.c:579
size_t granularity
#define DEFAULT_TRIM_THRESHOLD
Definition: malloc_2_8_3.c:586
MALLINFO_FIELD_TYPE hblkhd
Definition: malloc_2_8_3.c:659
#define align_offset(A)
static void init_top(mstate m, mchunkptr p, size_t psize)
#define ok_pinuse(p)
#define CMFAIL
#define CHUNK_OVERHEAD
static void munmap_is_to_be_called(void *m, void *ptr, MMAP_TRACE_H *h)
Definition: lea_heap.c:198
#define check_top_chunk(M, P)
struct malloc_tree_chunk * child[2]
#define insert_large_chunk(M, X, S)
#define mem2chunk(mem)
#define CALL_MORECORE(S)
#define MALLOC_FAILURE_ACTION
Definition: malloc_2_8_3.c:558
#define MMAP_TRACE_H_SIZE
Definition: lea_heap.c:87
mchunkptr dv
size_t footprint
#define is_initialized(M)
#define smallbin_at(M, i)
#define enable_mmap(M)
#define is_global(M)
#define unlink_large_chunk(M, X)
#define insert_chunk(M, P, S)
int i
Definition: dynamic_load.c:954
static size_t release_unused_segments(mstate m)
#define dlmalloc_footprint
Definition: malloc_2_8_3.c:692
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
#define set_inuse_and_pinuse(M, p, s)
static struct malloc_state _gm_
#define SIX_SIZE_T_SIZES
#define disable_contiguous(M)
#define pinuse(p)
#define M_GRANULARITY
Definition: malloc_2_8_3.c:619
#define M_TRIM_THRESHOLD
Definition: malloc_2_8_3.c:618
#define prev_chunk(p)
#define smallmap_is_marked(M, i)
#define DEFAULT_MMAP_THRESHOLD
Definition: malloc_2_8_3.c:593
static void init_bins(mstate m)
#define is_mmapped(p)
#define should_trim(M, s)
#define replace_dv(M, P, S)
#define FENCEPOST_HEAD
#define USE_LOCK_BIT
#define MCHUNK_SIZE
#define check_mmapped_chunk(M, P)
#define MALLOC_ALIGNMENT
Definition: malloc_2_8_3.c:524
#define ok_next(p, n)
#define calloc_must_clear(p)
#define fm
#define is_mmapped_segment(S)
#define leftmost_child(t)
struct malloc_chunk * sbinptr
#define set_lock(M, L)
static int sys_trim(mstate m, size_t pad)
#define next_chunk(p)
#define set_free_with_pinuse(p, s, n)
#define RELEASE_MAGIC_INIT_LOCK()
const char ** p
Definition: dynamic_load.c:945
#define MAX_REQUEST
#define check_malloc_state(M)
#define MIN_REQUEST
#define MALLINFO_FIELD_TYPE
Definition: malloc_2_8_3.c:608
MALLINFO_FIELD_TYPE uordblks
Definition: malloc_2_8_3.c:662
static int dev_zero_fd
#define MIN_LARGE_SIZE
MALLINFO_FIELD_TYPE smblks
Definition: malloc_2_8_3.c:657
MALLINFO_FIELD_TYPE usmblks
Definition: malloc_2_8_3.c:660
size_t mmap_threshold