casacore
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
malloc.h
Go to the documentation of this file.
1 //# malloc.h: malloc functions from Doug Lea
2 //# Copyright (C) 1996,1999,2001
3 //# Associated Universities, Inc. Washington DC, USA.
4 //#
5 //# This library is free software; you can redistribute it and/or modify it
6 //# under the terms of the GNU Library General Public License as published by
7 //# the Free Software Foundation; either version 2 of the License, or (at your
8 //# option) any later version.
9 //#
10 //# This library is distributed in the hope that it will be useful, but WITHOUT
11 //# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 //# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
13 //# License for more details.
14 //#
15 //# You should have received a copy of the GNU Library General Public License
16 //# along with this library; if not, write to the Free Software Foundation,
17 //# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
18 //#
19 //# Correspondence concerning AIPS++ should be addressed as follows:
20 //# Internet email: aips2-request@nrao.edu.
21 //# Postal address: AIPS++ Project Office
22 //# National Radio Astronomy Observatory
23 //# 520 Edgemont Road
24 //# Charlottesville, VA 22903-2475 USA
25 //#
26 //# $Id$
27 
28 #if !defined(AIPS_NO_LEA_MALLOC)
29 
30 #ifndef CASA_MALLOC_H
31 #define CASA_MALLOC_H
32 
33 
34 /*
35  A version of malloc/free/realloc written by Doug Lea and released to the
36  public domain. Send questions/comments/complaints/performance data
37  to dl@cs.oswego.edu
38 
39 * VERSION 2.6.5 Wed Jun 17 15:55:16 1998 Doug Lea (dl at gee)
40 */
41 
42 /* The only changes from the distribution are:
43  1. Added Casacore copyright notice and guard.
44  2. Compile to nothing for linux since we already get GNU malloc there.
45  3. If AIPS_DEBUG is set compile this malloc with DEBUG on.
46 */
47 
48 #if defined(AIPS_LINUX)
49 /* IS linux. Include malloc.h so we only have to include casa/OS/malloc.h
50  without an ifdef on OS.
51 */
52 #include <malloc.h>
53 #else
54 /* NOT linux */
55 
56 #if defined(AIPS_DEBUG)
57 /* Hopefully not too expensive. If so we can turn it off. */
58 #define DEBUG 1
59 #endif
60 
61 /*
62  Default header file for malloc-2.8.x, written by Doug Lea
63  and released to the public domain, as explained at
64  http://creativecommons.org/licenses/publicdomain.
65 
66  last update: Wed May 27 14:25:17 2009 Doug Lea (dl at gee)
67 
68  This header is for ANSI C/C++ only. You can set any of
69  the following #defines before including:
70 
71  * If USE_DL_PREFIX is defined, it is assumed that malloc.c
72  was also compiled with this option, so all routines
73  have names starting with "dl".
74 
75  * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
76  file will be #included AFTER <malloc.h>. This is needed only if
77  your system defines a struct mallinfo that is incompatible with the
78  standard one declared here. Otherwise, you can include this file
79  INSTEAD of your system system <malloc.h>. At least on ANSI, all
80  declarations should be compatible with system versions
81 
82  * If MSPACES is defined, declarations for mspace versions are included.
83 */
84 
85 #ifndef MALLOC_280_H
86 #define MALLOC_280_H
87 
88 #ifdef __cplusplus
89 extern "C" {
90 #endif
91 
92 #include <stddef.h> /* for size_t */
93 
94 #ifndef ONLY_MSPACES
95 #define ONLY_MSPACES 0 /* define to a value */
96 #endif /* ONLY_MSPACES */
97 #ifndef NO_MALLINFO
98 #define NO_MALLINFO 0
99 #endif /* NO_MALLINFO */
100 
101 
102 #if !ONLY_MSPACES
103 
104 #ifndef USE_DL_PREFIX
105 #define dlcalloc calloc
106 #define dlfree free
107 #define dlmalloc malloc
108 #define dlmemalign memalign
109 #define dlrealloc realloc
110 #define dlvalloc valloc
111 #define dlpvalloc pvalloc
112 #define dlmallinfo mallinfo
113 #define dlmallopt mallopt
114 #define dlmalloc_trim malloc_trim
115 #define dlmalloc_stats malloc_stats
116 #define dlmalloc_usable_size malloc_usable_size
117 #define dlmalloc_footprint malloc_footprint
118 #define dlindependent_calloc independent_calloc
119 #define dlindependent_comalloc independent_comalloc
120 #endif /* USE_DL_PREFIX */
121 #if !NO_MALLINFO
122 #ifndef HAVE_USR_INCLUDE_MALLOC_H
123 #ifndef _MALLOC_H
124 #ifndef MALLINFO_FIELD_TYPE
125 #define MALLINFO_FIELD_TYPE size_t
126 #endif /* MALLINFO_FIELD_TYPE */
127 #ifndef STRUCT_MALLINFO_DECLARED
128 #define STRUCT_MALLINFO_DECLARED 1
129 struct mallinfo {
130  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
131  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
132  MALLINFO_FIELD_TYPE smblks; /* always 0 */
133  MALLINFO_FIELD_TYPE hblks; /* always 0 */
134  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
135  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
136  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
137  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
138  MALLINFO_FIELD_TYPE fordblks; /* total free space */
139  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
140 };
141 #endif /* STRUCT_MALLINFO_DECLARED */
142 #endif /* _MALLOC_H */
143 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
144 #endif /* !NO_MALLINFO */
145 
146 /*
147  malloc(size_t n)
148  Returns a pointer to a newly allocated chunk of at least n bytes, or
149  null if no space is available, in which case errno is set to ENOMEM
150  on ANSI C systems.
151 
152  If n is zero, malloc returns a minimum-sized chunk. (The minimum
153  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
154  systems.) Note that size_t is an unsigned type, so calls with
155  arguments that would be negative if signed are interpreted as
156  requests for huge amounts of space, which will often fail. The
157  maximum supported value of n differs across systems, but is in all
158  cases less than the maximum representable value of a size_t.
159 */
160 void* dlmalloc(size_t);
161 
162 /*
163  free(void* p)
164  Releases the chunk of memory pointed to by p, that had been previously
165  allocated using malloc or a related routine such as realloc.
166  It has no effect if p is null. If p was not malloced or already
167  freed, free(p) will by default cuase the current program to abort.
168 */
169 void dlfree(void*);
170 
171 /*
172  calloc(size_t n_elements, size_t element_size);
173  Returns a pointer to n_elements * element_size bytes, with all locations
174  set to zero.
175 */
176 void* dlcalloc(size_t, size_t);
177 
178 /*
179  realloc(void* p, size_t n)
180  Returns a pointer to a chunk of size n that contains the same data
181  as does chunk p up to the minimum of (n, p's size) bytes, or null
182  if no space is available.
183 
184  The returned pointer may or may not be the same as p. The algorithm
185  prefers extending p in most cases when possible, otherwise it
186  employs the equivalent of a malloc-copy-free sequence.
187 
188  If p is null, realloc is equivalent to malloc.
189 
190  If space is not available, realloc returns null, errno is set (if on
191  ANSI) and p is NOT freed.
192 
193  if n is for fewer bytes than already held by p, the newly unused
194  space is lopped off and freed if possible. realloc with a size
195  argument of zero (re)allocates a minimum-sized chunk.
196 
197  The old unix realloc convention of allowing the last-free'd chunk
198  to be used as an argument to realloc is not supported.
199 */
200 
201 void* dlrealloc(void*, size_t);
202 
203 /*
204  memalign(size_t alignment, size_t n);
205  Returns a pointer to a newly allocated chunk of n bytes, aligned
206  in accord with the alignment argument.
207 
208  The alignment argument should be a power of two. If the argument is
209  not a power of two, the nearest greater power is used.
210  8-byte alignment is guaranteed by normal malloc calls, so don't
211  bother calling memalign with an argument of 8 or less.
212 
213  Overreliance on memalign is a sure way to fragment space.
214 */
215 void* dlmemalign(size_t, size_t);
216 
217 /*
218  valloc(size_t n);
219  Equivalent to memalign(pagesize, n), where pagesize is the page
220  size of the system. If the pagesize is unknown, 4096 is used.
221 */
222 void* dlvalloc(size_t);
223 
224 /*
225  mallopt(int parameter_number, int parameter_value)
226  Sets tunable parameters The format is to provide a
227  (parameter-number, parameter-value) pair. mallopt then sets the
228  corresponding parameter to the argument value if it can (i.e., so
229  long as the value is meaningful), and returns 1 if successful else
230  0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
231  normally defined in malloc.h. None of these are use in this malloc,
232  so setting them has no effect. But this malloc also supports other
233  options in mallopt:
234 
235  Symbol param # default allowed param values
236  M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
237  M_GRANULARITY -2 page size any power of 2 >= page size
238  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
239 */
240 int dlmallopt(int, int);
241 
242 #define M_TRIM_THRESHOLD (-1)
243 #define M_GRANULARITY (-2)
244 #define M_MMAP_THRESHOLD (-3)
245 
246 
247 /*
248  malloc_footprint();
249  Returns the number of bytes obtained from the system. The total
250  number of bytes allocated by malloc, realloc etc., is less than this
251  value. Unlike mallinfo, this function returns only a precomputed
252  result, so can be called frequently to monitor memory consumption.
253  Even if locks are otherwise defined, this function does not use them,
254  so results might not be up to date.
255 */
256 size_t dlmalloc_footprint();
257 
258 #if !NO_MALLINFO
259 /*
260  mallinfo()
261  Returns (by copy) a struct containing various summary statistics:
262 
263  arena: current total non-mmapped bytes allocated from system
264  ordblks: the number of free chunks
265  smblks: always zero.
266  hblks: current number of mmapped regions
267  hblkhd: total bytes held in mmapped regions
268  usmblks: the maximum total allocated space. This will be greater
269  than current total if trimming has occurred.
270  fsmblks: always zero
271  uordblks: current total allocated space (normal or mmapped)
272  fordblks: total free space
273  keepcost: the maximum number of bytes that could ideally be released
274  back to system via malloc_trim. ("ideally" means that
275  it ignores page restrictions etc.)
276 
277  Because these fields are ints, but internal bookkeeping may
278  be kept as longs, the reported values may wrap around zero and
279  thus be inaccurate.
280 */
281 
282 struct mallinfo dlmallinfo(void);
283 #endif /* NO_MALLINFO */
284 
285 /*
286  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
287 
288  independent_calloc is similar to calloc, but instead of returning a
289  single cleared space, it returns an array of pointers to n_elements
290  independent elements that can hold contents of size elem_size, each
291  of which starts out cleared, and can be independently freed,
292  realloc'ed etc. The elements are guaranteed to be adjacently
293  allocated (this is not guaranteed to occur with multiple callocs or
294  mallocs), which may also improve cache locality in some
295  applications.
296 
297  The "chunks" argument is optional (i.e., may be null, which is
298  probably the most typical usage). If it is null, the returned array
299  is itself dynamically allocated and should also be freed when it is
300  no longer needed. Otherwise, the chunks array must be of at least
301  n_elements in length. It is filled in with the pointers to the
302  chunks.
303 
304  In either case, independent_calloc returns this pointer array, or
305  null if the allocation failed. If n_elements is zero and "chunks"
306  is null, it returns a chunk representing an array with zero elements
307  (which should be freed if not wanted).
308 
309  Each element must be individually freed when it is no longer
310  needed. If you'd like to instead be able to free all at once, you
311  should instead use regular calloc and assign pointers into this
312  space to represent elements. (In this case though, you cannot
313  independently free elements.)
314 
315  independent_calloc simplifies and speeds up implementations of many
316  kinds of pools. It may also be useful when constructing large data
317  structures that initially have a fixed number of fixed-sized nodes,
318  but the number is not known at compile time, and some of the nodes
319  may later need to be freed. For example:
320 
321  struct Node { int item; struct Node* next; };
322 
323  struct Node* build_list() {
324  struct Node** pool;
325  int n = read_number_of_nodes_needed();
326  if (n <= 0) return 0;
327  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
328  if (pool == 0) die();
329  // organize into a linked list...
330  struct Node* first = pool[0];
331  for (i = 0; i < n-1; ++i)
332  pool[i]->next = pool[i+1];
333  free(pool); // Can now free the array (or not, if it is needed later)
334  return first;
335  }
336 */
337 void** dlindependent_calloc(size_t, size_t, void**);
338 
339 /*
340  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
341 
342  independent_comalloc allocates, all at once, a set of n_elements
343  chunks with sizes indicated in the "sizes" array. It returns
344  an array of pointers to these elements, each of which can be
345  independently freed, realloc'ed etc. The elements are guaranteed to
346  be adjacently allocated (this is not guaranteed to occur with
347  multiple callocs or mallocs), which may also improve cache locality
348  in some applications.
349 
350  The "chunks" argument is optional (i.e., may be null). If it is null
351  the returned array is itself dynamically allocated and should also
352  be freed when it is no longer needed. Otherwise, the chunks array
353  must be of at least n_elements in length. It is filled in with the
354  pointers to the chunks.
355 
356  In either case, independent_comalloc returns this pointer array, or
357  null if the allocation failed. If n_elements is zero and chunks is
358  null, it returns a chunk representing an array with zero elements
359  (which should be freed if not wanted).
360 
361  Each element must be individually freed when it is no longer
362  needed. If you'd like to instead be able to free all at once, you
363  should instead use a single regular malloc, and assign pointers at
364  particular offsets in the aggregate space. (In this case though, you
365  cannot independently free elements.)
366 
367  independent_comallac differs from independent_calloc in that each
368  element may have a different size, and also that it does not
369  automatically clear elements.
370 
371  independent_comalloc can be used to speed up allocation in cases
372  where several structs or objects must always be allocated at the
373  same time. For example:
374 
375  struct Head { ... }
376  struct Foot { ... }
377 
378  void send_message(char* msg) {
379  int msglen = strlen(msg);
380  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
381  void* chunks[3];
382  if (independent_comalloc(3, sizes, chunks) == 0)
383  die();
384  struct Head* head = (struct Head*)(chunks[0]);
385  char* body = (char*)(chunks[1]);
386  struct Foot* foot = (struct Foot*)(chunks[2]);
387  // ...
388  }
389 
390  In general though, independent_comalloc is worth using only for
391  larger values of n_elements. For small values, you probably won't
392  detect enough difference from series of malloc calls to bother.
393 
394  Overuse of independent_comalloc can increase overall memory usage,
395  since it cannot reuse existing noncontiguous small chunks that
396  might be available for some of the elements.
397 */
398 void** dlindependent_comalloc(size_t, size_t*, void**);
399 
400 
401 /*
402  pvalloc(size_t n);
403  Equivalent to valloc(minimum-page-that-holds(n)), that is,
404  round up n to nearest pagesize.
405  */
406 void* dlpvalloc(size_t);
407 
408 /*
409  malloc_trim(size_t pad);
410 
411  If possible, gives memory back to the system (via negative arguments
412  to sbrk) if there is unused memory at the `high' end of the malloc
413  pool or in unused MMAP segments. You can call this after freeing
414  large blocks of memory to potentially reduce the system-level memory
415  requirements of a program. However, it cannot guarantee to reduce
416  memory. Under some allocation patterns, some large free blocks of
417  memory will be locked between two used chunks, so they cannot be
418  given back to the system.
419 
420  The `pad' argument to malloc_trim represents the amount of free
421  trailing space to leave untrimmed. If this argument is zero, only
422  the minimum amount of memory to maintain internal data structures
423  will be left. Non-zero arguments can be supplied to maintain enough
424  trailing space to service future expected allocations without having
425  to re-obtain memory from the system.
426 
427  Malloc_trim returns 1 if it actually released any memory, else 0.
428 */
429 int dlmalloc_trim(size_t);
430 
431 /*
432  malloc_stats();
433  Prints on stderr the amount of space obtained from the system (both
434  via sbrk and mmap), the maximum amount (which may be more than
435  current if malloc_trim and/or munmap got called), and the current
436  number of bytes allocated via malloc (or realloc, etc) but not yet
437  freed. Note that this is the number of bytes allocated, not the
438  number requested. It will be larger than the number requested
439  because of alignment and bookkeeping overhead. Because it includes
440  alignment wastage as being in use, this figure may be greater than
441  zero even when no user-level chunks are allocated.
442 
443  The reported current and maximum system memory can be inaccurate if
444  a program makes other calls to system memory allocation functions
445  (normally sbrk) outside of malloc.
446 
447  malloc_stats prints only the most commonly interesting statistics.
448  More information can be obtained by calling mallinfo.
449 */
450 void dlmalloc_stats();
451 
452 #endif /* !ONLY_MSPACES */
453 
454 /*
455  malloc_usable_size(void* p);
456 
457  Returns the number of bytes you can actually use in
458  an allocated chunk, which may be more than you requested (although
459  often not) due to alignment and minimum size constraints.
460  You can use this many bytes without worrying about
461  overwriting other allocated objects. This is not a particularly great
462  programming practice. malloc_usable_size can be more useful in
463  debugging and assertions, for example:
464 
465  p = malloc(n);
466  assert(malloc_usable_size(p) >= 256);
467 */
468 size_t dlmalloc_usable_size(void*);
469 
470 
471 #if MSPACES
472 
473 /*
474  mspace is an opaque type representing an independent
475  region of space that supports mspace_malloc, etc.
476 */
477 typedef void* mspace;
478 
479 /*
480  create_mspace creates and returns a new independent space with the
481  given initial capacity, or, if 0, the default granularity size. It
482  returns null if there is no system memory available to create the
483  space. If argument locked is non-zero, the space uses a separate
484  lock to control access. The capacity of the space will grow
485  dynamically as needed to service mspace_malloc requests. You can
486  control the sizes of incremental increases of this space by
487  compiling with a different DEFAULT_GRANULARITY or dynamically
488  setting with mallopt(M_GRANULARITY, value).
489 */
490 mspace create_mspace(size_t capacity, int locked);
491 
492 /*
493  destroy_mspace destroys the given space, and attempts to return all
494  of its memory back to the system, returning the total number of
495  bytes freed. After destruction, the results of access to all memory
496  used by the space become undefined.
497 */
498 size_t destroy_mspace(mspace msp);
499 
500 /*
501  create_mspace_with_base uses the memory supplied as the initial base
502  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
503  space is used for bookkeeping, so the capacity must be at least this
504  large. (Otherwise 0 is returned.) When this initial space is
505  exhausted, additional memory will be obtained from the system.
506  Destroying this space will deallocate all additionally allocated
507  space (if possible) but not the initial base.
508 */
509 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
510 
511 /*
512  mspace_track_large_chunks controls whether requests for large chunks
513  are allocated in their own untracked mmapped regions, separate from
514  others in this mspace. By default large chunks are not tracked,
515  which reduces fragmentation. However, such chunks are not
516  necessarily released to the system upon destroy_mspace. Enabling
517  tracking by setting to true may increase fragmentation, but avoids
518  leakage when relying on destroy_mspace to release all memory
519  allocated using this space. The function returns the previous
520  setting.
521 */
522 int mspace_track_large_chunks(mspace msp, int enable);
523 
524 /*
525  mspace_malloc behaves as malloc, but operates within
526  the given space.
527 */
528 void* mspace_malloc(mspace msp, size_t bytes);
529 
530 /*
531  mspace_free behaves as free, but operates within
532  the given space.
533 
534  If compiled with FOOTERS==1, mspace_free is not actually needed.
535  free may be called instead of mspace_free because freed chunks from
536  any space are handled by their originating spaces.
537 */
538 void mspace_free(mspace msp, void* mem);
539 
540 /*
541  mspace_realloc behaves as realloc, but operates within
542  the given space.
543 
544  If compiled with FOOTERS==1, mspace_realloc is not actually
545  needed. realloc may be called instead of mspace_realloc because
546  realloced chunks from any space are handled by their originating
547  spaces.
548 */
549 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
550 
551 /*
552  mspace_calloc behaves as calloc, but operates within
553  the given space.
554 */
555 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
556 
557 /*
558  mspace_memalign behaves as memalign, but operates within
559  the given space.
560 */
561 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
562 
563 /*
564  mspace_independent_calloc behaves as independent_calloc, but
565  operates within the given space.
566 */
567 void** mspace_independent_calloc(mspace msp, size_t n_elements,
568  size_t elem_size, void* chunks[]);
569 
570 /*
571  mspace_independent_comalloc behaves as independent_comalloc, but
572  operates within the given space.
573 */
574 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
575  size_t sizes[], void* chunks[]);
576 
577 /*
578  mspace_footprint() returns the number of bytes obtained from the
579  system for this space.
580 */
581 size_t mspace_footprint(mspace msp);
582 
583 
584 #if !NO_MALLINFO
585 /*
586  mspace_mallinfo behaves as mallinfo, but reports properties of
587  the given space.
588 */
589 struct mallinfo mspace_mallinfo(mspace msp);
590 #endif /* NO_MALLINFO */
591 
592 /*
593  malloc_usable_size(void* p) behaves the same as malloc_usable_size;
594 */
595  size_t mspace_usable_size(void* mem);
596 
597 /*
598  mspace_malloc_stats behaves as malloc_stats, but reports
599  properties of the given space.
600 */
601 void mspace_malloc_stats(mspace msp);
602 
603 /*
604  mspace_trim behaves as malloc_trim, but
605  operates within the given space.
606 */
607 int mspace_trim(mspace msp, size_t pad);
608 
609 /*
610  An alias for mallopt.
611 */
612 int mspace_mallopt(int, int);
613 
614 #endif /* MSPACES */
615 
616 #ifdef __cplusplus
617 } /* end of extern "C" */
618 #endif
619 
620 #endif /* MALLOC_280_H */
621 
622 #endif
623 /* AIPS_LINUX */
624 
625 #endif
626 /* AIPS_MALLOC */
627 
628 #endif
629 /* AIPS_NO_LEA_MALLOC */
size_t mspace_usable_size(void *mem)
mspace create_mspace_with_base(void *base, size_t capacity, int locked)
int mspace_trim(mspace msp, size_t pad)
MALLINFO_FIELD_TYPE arena
Definition: malloc.h:130
int mspace_mallopt(int, int)
MALLINFO_FIELD_TYPE hblks
Definition: malloc.h:133
#define dlmallinfo
Definition: malloc.h:112
#define dlmalloc_trim
Definition: malloc.h:114
#define dlmalloc_stats
Definition: malloc.h:115
#define dlmallopt
Definition: malloc.h:113
size_t mspace_footprint(mspace msp)
#define dlmalloc_footprint
Definition: malloc.h:117
mspace create_mspace(size_t capacity, int locked)
MALLINFO_FIELD_TYPE ordblks
Definition: malloc.h:131
int mspace_track_large_chunks(mspace msp, int enable)
#define dlmalloc_usable_size
Definition: malloc.h:116
void * mspace_malloc(mspace msp, size_t bytes)
#define MALLINFO_FIELD_TYPE
Definition: malloc.h:125
#define dlmalloc
Definition: malloc.h:107
void ** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void *chunks[])
void * mspace
Definition: malloc.h:477
MALLINFO_FIELD_TYPE fordblks
Definition: malloc.h:138
struct mallinfo mspace_mallinfo(mspace msp)
#define dlpvalloc
Definition: malloc.h:111
#define dlvalloc
Definition: malloc.h:110
MALLINFO_FIELD_TYPE fsmblks
Definition: malloc.h:136
#define dlrealloc
Definition: malloc.h:109
#define dlindependent_calloc
Definition: malloc.h:118
MALLINFO_FIELD_TYPE keepcost
Definition: malloc.h:139
MALLINFO_FIELD_TYPE hblkhd
Definition: malloc.h:134
void mspace_free(mspace msp, void *mem)
#define dlmemalign
Definition: malloc.h:108
void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
#define dlcalloc
Definition: malloc.h:105
size_t destroy_mspace(mspace msp)
void ** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void *chunks[])
void mspace_malloc_stats(mspace msp)
#define dlfree
Definition: malloc.h:106
#define dlindependent_comalloc
Definition: malloc.h:119
void * mspace_realloc(mspace msp, void *mem, size_t newsize)
MALLINFO_FIELD_TYPE uordblks
Definition: malloc.h:137
MALLINFO_FIELD_TYPE smblks
Definition: malloc.h:132
MALLINFO_FIELD_TYPE usmblks
Definition: malloc.h:135
void * mspace_memalign(mspace msp, size_t alignment, size_t bytes)