Line data Source code
1 : /*
2 : * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
3 : *
4 : * Permission to use, copy, modify, and distribute this software for any
5 : * purpose with or without fee is hereby granted, provided that the above
6 : * copyright notice and this permission notice appear in all copies.
7 : *
8 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 : */
16 :
17 : #include <zebra.h>
18 :
19 : #include <stdlib.h>
20 : #ifdef HAVE_MALLOC_H
21 : #include <malloc.h>
22 : #endif
23 : #ifdef HAVE_MALLOC_NP_H
24 : #include <malloc_np.h>
25 : #endif
26 : #ifdef HAVE_MALLOC_MALLOC_H
27 : #include <malloc/malloc.h>
28 : #endif
29 :
30 : #include "memory.h"
31 : #include "log.h"
32 : #include "libfrr_trace.h"
33 :
34 : static struct memgroup *mg_first = NULL;
35 : struct memgroup **mg_insert = &mg_first;
36 :
37 12 : DEFINE_MGROUP(LIB, "libfrr");
38 12 : DEFINE_MTYPE(LIB, TMP, "Temporary memory");
39 12 : DEFINE_MTYPE(LIB, BITFIELD, "Bitfield memory");
40 :
41 632389 : static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr)
42 : {
43 632389 : size_t current;
44 632389 : size_t oldsize;
45 :
46 632389 : current = 1 + atomic_fetch_add_explicit(&mt->n_alloc, 1,
47 : memory_order_relaxed);
48 :
49 632389 : oldsize = atomic_load_explicit(&mt->n_max, memory_order_relaxed);
50 632389 : if (current > oldsize)
51 : /* note that this may fail, but approximation is sufficient */
52 315365 : atomic_compare_exchange_weak_explicit(&mt->n_max, &oldsize,
53 : current,
54 : memory_order_relaxed,
55 : memory_order_relaxed);
56 :
57 632388 : oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
58 632388 : if (oldsize == 0)
59 312 : oldsize = atomic_exchange_explicit(&mt->size, size,
60 : memory_order_relaxed);
61 632388 : if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
62 87 : atomic_store_explicit(&mt->size, SIZE_VAR,
63 : memory_order_relaxed);
64 :
65 : #ifdef HAVE_MALLOC_USABLE_SIZE
66 632388 : size_t mallocsz = malloc_usable_size(ptr);
67 :
68 632387 : current = mallocsz + atomic_fetch_add_explicit(&mt->total, mallocsz,
69 : memory_order_relaxed);
70 632387 : oldsize = atomic_load_explicit(&mt->max_size, memory_order_relaxed);
71 632387 : if (current > oldsize)
72 : /* note that this may fail, but approximation is sufficient */
73 316776 : atomic_compare_exchange_weak_explicit(&mt->max_size, &oldsize,
74 : current,
75 : memory_order_relaxed,
76 : memory_order_relaxed);
77 : #endif
78 632387 : }
79 :
80 633943 : static inline void mt_count_free(struct memtype *mt, void *ptr)
81 : {
82 633943 : frrtrace(2, frr_libfrr, memfree, mt, ptr);
83 :
84 633943 : assert(mt->n_alloc);
85 633943 : atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
86 :
87 : #ifdef HAVE_MALLOC_USABLE_SIZE
88 633943 : size_t mallocsz = malloc_usable_size(ptr);
89 :
90 633943 : atomic_fetch_sub_explicit(&mt->total, mallocsz, memory_order_relaxed);
91 : #endif
92 633943 : }
93 :
94 632388 : static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
95 : {
96 632388 : frrtrace(3, frr_libfrr, memalloc, mt, ptr, size);
97 :
98 632389 : if (__builtin_expect(ptr == NULL, 0)) {
99 0 : if (size) {
100 : /* malloc(0) is allowed to return NULL */
101 0 : memory_oom(size, mt->name);
102 : }
103 : return NULL;
104 : }
105 632389 : mt_count_alloc(mt, size, ptr);
106 632389 : return ptr;
107 : }
108 :
109 12625 : void *qmalloc(struct memtype *mt, size_t size)
110 : {
111 12625 : return mt_checkalloc(mt, malloc(size), size);
112 : }
113 :
114 447081 : void *qcalloc(struct memtype *mt, size_t size)
115 : {
116 447081 : return mt_checkalloc(mt, calloc(size, 1), size);
117 : }
118 :
119 48965 : void *qrealloc(struct memtype *mt, void *ptr, size_t size)
120 : {
121 48965 : if (ptr)
122 48932 : mt_count_free(mt, ptr);
123 48965 : return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
124 : }
125 :
126 123720 : void *qstrdup(struct memtype *mt, const char *str)
127 : {
128 123720 : return str ? mt_checkalloc(mt, strdup(str), strlen(str) + 1) : NULL;
129 : }
130 :
131 0 : void qcountfree(struct memtype *mt, void *ptr)
132 : {
133 0 : if (ptr)
134 0 : mt_count_free(mt, ptr);
135 0 : }
136 :
137 796375 : void qfree(struct memtype *mt, void *ptr)
138 : {
139 796375 : if (ptr)
140 585011 : mt_count_free(mt, ptr);
141 796375 : free(ptr);
142 796375 : }
143 :
144 7 : int qmem_walk(qmem_walk_fn *func, void *arg)
145 : {
146 7 : struct memgroup *mg;
147 7 : struct memtype *mt;
148 7 : int rv;
149 :
150 43 : for (mg = mg_first; mg; mg = mg->next) {
151 36 : if ((rv = func(arg, mg, NULL)))
152 0 : return rv;
153 1681 : for (mt = mg->types; mt; mt = mt->next)
154 1645 : if ((rv = func(arg, mg, mt)))
155 0 : return rv;
156 : }
157 : return 0;
158 : }
159 :
160 : struct exit_dump_args {
161 : FILE *fp;
162 : const char *prefix;
163 : int error;
164 : struct memgroup *mg_printed;
165 : };
166 :
167 1681 : static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
168 : {
169 1681 : struct exit_dump_args *eda = arg;
170 1681 : char size[32];
171 :
172 1681 : if (!mt || !mt->n_alloc)
173 1586 : return 0;
174 95 : if (!mg->active_at_exit)
175 74 : eda->error++;
176 :
177 95 : if (eda->mg_printed != mg) {
178 19 : if (eda->fp != stderr)
179 9 : fprintf(eda->fp, "%s: showing active allocations in memory group %s",
180 : eda->prefix, mg->name);
181 10 : else if (mg->active_at_exit)
182 4 : zlog_debug("%s: showing active allocations in memory group %s",
183 : eda->prefix, mg->name);
184 : else
185 6 : zlog_warn("%s: showing active allocations in memory group %s",
186 : eda->prefix, mg->name);
187 19 : eda->mg_printed = mg;
188 : }
189 :
190 95 : snprintf(size, sizeof(size), "%10zu", mt->size);
191 95 : if (eda->fp != stderr)
192 92 : fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s",
193 46 : eda->prefix, mt->name, mt->n_alloc,
194 46 : mt->size == SIZE_VAR ? "(variably sized)" : size);
195 49 : else if (mg->active_at_exit)
196 12 : zlog_debug("%s: memstats: %-30s: %6zu * %s",
197 : eda->prefix, mt->name, mt->n_alloc,
198 : mt->size == SIZE_VAR ? "(variably sized)" : size);
199 : else
200 51 : zlog_warn("%s: memstats: %-30s: %6zu * %s",
201 : eda->prefix, mt->name, mt->n_alloc,
202 : mt->size == SIZE_VAR ? "(variably sized)" : size);
203 : return 0;
204 : }
205 :
206 7 : int log_memstats(FILE *fp, const char *prefix)
207 : {
208 7 : struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
209 7 : qmem_walk(qmem_exit_walker, &eda);
210 7 : return eda.error;
211 : }
|