Line data Source code
1 : /*
2 : * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
3 : *
4 : * Permission to use, copy, modify, and distribute this software for any
5 : * purpose with or without fee is hereby granted, provided that the above
6 : * copyright notice and this permission notice appear in all copies.
7 : *
8 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 : */
16 :
17 : #include <zebra.h>
18 :
19 : #include <stdlib.h>
20 : #ifdef HAVE_MALLOC_H
21 : #include <malloc.h>
22 : #endif
23 : #ifdef HAVE_MALLOC_NP_H
24 : #include <malloc_np.h>
25 : #endif
26 : #ifdef HAVE_MALLOC_MALLOC_H
27 : #include <malloc/malloc.h>
28 : #endif
29 :
30 : #include "memory.h"
31 : #include "log.h"
32 : #include "libfrr_trace.h"
33 :
34 : static struct memgroup *mg_first = NULL;
35 : struct memgroup **mg_insert = &mg_first;
36 :
37 36 : DEFINE_MGROUP(LIB, "libfrr");
38 36 : DEFINE_MTYPE(LIB, TMP, "Temporary memory");
39 36 : DEFINE_MTYPE(LIB, BITFIELD, "Bitfield memory");
40 :
41 891844 : static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr)
42 : {
43 891844 : size_t current;
44 891844 : size_t oldsize;
45 :
46 891844 : current = 1 + atomic_fetch_add_explicit(&mt->n_alloc, 1,
47 : memory_order_relaxed);
48 :
49 891844 : oldsize = atomic_load_explicit(&mt->n_max, memory_order_relaxed);
50 891844 : if (current > oldsize)
51 : /* note that this may fail, but approximation is sufficient */
52 481084 : atomic_compare_exchange_weak_explicit(&mt->n_max, &oldsize,
53 : current,
54 : memory_order_relaxed,
55 : memory_order_relaxed);
56 :
57 891846 : oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
58 891846 : if (oldsize == 0)
59 858 : oldsize = atomic_exchange_explicit(&mt->size, size,
60 : memory_order_relaxed);
61 891846 : if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
62 223 : atomic_store_explicit(&mt->size, SIZE_VAR,
63 : memory_order_relaxed);
64 :
65 : #ifdef HAVE_MALLOC_USABLE_SIZE
66 891846 : size_t mallocsz = malloc_usable_size(ptr);
67 :
68 891844 : current = mallocsz + atomic_fetch_add_explicit(&mt->total, mallocsz,
69 : memory_order_relaxed);
70 891844 : oldsize = atomic_load_explicit(&mt->max_size, memory_order_relaxed);
71 891844 : if (current > oldsize)
72 : /* note that this may fail, but approximation is sufficient */
73 484276 : atomic_compare_exchange_weak_explicit(&mt->max_size, &oldsize,
74 : current,
75 : memory_order_relaxed,
76 : memory_order_relaxed);
77 : #endif
78 891844 : }
79 :
80 893380 : static inline void mt_count_free(struct memtype *mt, void *ptr)
81 : {
82 893380 : frrtrace(2, frr_libfrr, memfree, mt, ptr);
83 :
84 893380 : assert(mt->n_alloc);
85 893380 : atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
86 :
87 : #ifdef HAVE_MALLOC_USABLE_SIZE
88 893380 : size_t mallocsz = malloc_usable_size(ptr);
89 :
90 893380 : atomic_fetch_sub_explicit(&mt->total, mallocsz, memory_order_relaxed);
91 : #endif
92 893380 : }
93 :
94 891845 : static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
95 : {
96 891845 : frrtrace(3, frr_libfrr, memalloc, mt, ptr, size);
97 :
98 891845 : if (__builtin_expect(ptr == NULL, 0)) {
99 0 : if (size) {
100 : /* malloc(0) is allowed to return NULL */
101 0 : memory_oom(size, mt->name);
102 : }
103 : return NULL;
104 : }
105 891845 : mt_count_alloc(mt, size, ptr);
106 891845 : return ptr;
107 : }
108 :
109 17729 : void *qmalloc(struct memtype *mt, size_t size)
110 : {
111 17729 : return mt_checkalloc(mt, malloc(size), size);
112 : }
113 :
114 640685 : void *qcalloc(struct memtype *mt, size_t size)
115 : {
116 640685 : return mt_checkalloc(mt, calloc(size, 1), size);
117 : }
118 :
119 65270 : void *qrealloc(struct memtype *mt, void *ptr, size_t size)
120 : {
121 65270 : if (ptr)
122 64583 : mt_count_free(mt, ptr);
123 65270 : return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
124 : }
125 :
126 168161 : void *qstrdup(struct memtype *mt, const char *str)
127 : {
128 168161 : return str ? mt_checkalloc(mt, strdup(str), strlen(str) + 1) : NULL;
129 : }
130 :
131 0 : void qcountfree(struct memtype *mt, void *ptr)
132 : {
133 0 : if (ptr)
134 0 : mt_count_free(mt, ptr);
135 0 : }
136 :
137 1109636 : void qfree(struct memtype *mt, void *ptr)
138 : {
139 1109636 : if (ptr)
140 828796 : mt_count_free(mt, ptr);
141 1109638 : free(ptr);
142 1109638 : }
143 :
144 24 : int qmem_walk(qmem_walk_fn *func, void *arg)
145 : {
146 24 : struct memgroup *mg;
147 24 : struct memtype *mt;
148 24 : int rv;
149 :
150 120 : for (mg = mg_first; mg; mg = mg->next) {
151 96 : if ((rv = func(arg, mg, NULL)))
152 0 : return rv;
153 4520 : for (mt = mg->types; mt; mt = mt->next)
154 4424 : if ((rv = func(arg, mg, mt)))
155 0 : return rv;
156 : }
157 : return 0;
158 : }
159 :
160 : struct exit_dump_args {
161 : FILE *fp;
162 : const char *prefix;
163 : int error;
164 : struct memgroup *mg_printed;
165 : };
166 :
167 4520 : static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
168 : {
169 4520 : struct exit_dump_args *eda = arg;
170 4520 : char size[32];
171 :
172 4520 : if (!mt || !mt->n_alloc)
173 4192 : return 0;
174 328 : if (!mg->active_at_exit)
175 256 : eda->error++;
176 :
177 328 : if (eda->mg_printed != mg) {
178 64 : if (eda->fp != stderr)
179 32 : fprintf(eda->fp, "%s: showing active allocations in memory group %s",
180 : eda->prefix, mg->name);
181 32 : else if (mg->active_at_exit)
182 12 : zlog_debug("%s: showing active allocations in memory group %s",
183 : eda->prefix, mg->name);
184 : else
185 20 : zlog_warn("%s: showing active allocations in memory group %s",
186 : eda->prefix, mg->name);
187 64 : eda->mg_printed = mg;
188 : }
189 :
190 328 : snprintf(size, sizeof(size), "%10zu", mt->size);
191 328 : if (eda->fp != stderr)
192 328 : fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s",
193 164 : eda->prefix, mt->name, mt->n_alloc,
194 164 : mt->size == SIZE_VAR ? "(variably sized)" : size);
195 164 : else if (mg->active_at_exit)
196 36 : zlog_debug("%s: memstats: %-30s: %6zu * %s",
197 : eda->prefix, mt->name, mt->n_alloc,
198 : mt->size == SIZE_VAR ? "(variably sized)" : size);
199 : else
200 160 : zlog_warn("%s: memstats: %-30s: %6zu * %s",
201 : eda->prefix, mt->name, mt->n_alloc,
202 : mt->size == SIZE_VAR ? "(variably sized)" : size);
203 : return 0;
204 : }
205 :
206 24 : int log_memstats(FILE *fp, const char *prefix)
207 : {
208 24 : struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
209 24 : qmem_walk(qmem_exit_walker, &eda);
210 24 : return eda.error;
211 : }
|