Line data Source code
1 : /*
2 : * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
3 : *
4 : * Permission to use, copy, modify, and distribute this software for any
5 : * purpose with or without fee is hereby granted, provided that the above
6 : * copyright notice and this permission notice appear in all copies.
7 : *
8 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 : */
16 :
17 : #include <zebra.h>
18 :
19 : #include <stdlib.h>
20 : #ifdef HAVE_MALLOC_H
21 : #include <malloc.h>
22 : #endif
23 : #ifdef HAVE_MALLOC_NP_H
24 : #include <malloc_np.h>
25 : #endif
26 : #ifdef HAVE_MALLOC_MALLOC_H
27 : #include <malloc/malloc.h>
28 : #endif
29 :
30 : #include "memory.h"
31 : #include "log.h"
32 : #include "libfrr_trace.h"
33 :
34 : static struct memgroup *mg_first = NULL;
35 : struct memgroup **mg_insert = &mg_first;
36 :
37 586 : DEFINE_MGROUP(LIB, "libfrr");
38 586 : DEFINE_MTYPE(LIB, TMP, "Temporary memory");
39 586 : DEFINE_MTYPE(LIB, BITFIELD, "Bitfield memory");
40 :
41 20384922 : static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr)
42 : {
43 20384922 : size_t current;
44 20384922 : size_t oldsize;
45 :
46 20384922 : current = 1 + atomic_fetch_add_explicit(&mt->n_alloc, 1,
47 : memory_order_relaxed);
48 :
49 20384922 : oldsize = atomic_load_explicit(&mt->n_max, memory_order_relaxed);
50 20384922 : if (current > oldsize)
51 : /* note that this may fail, but approximation is sufficient */
52 10566763 : atomic_compare_exchange_weak_explicit(&mt->n_max, &oldsize,
53 : current,
54 : memory_order_relaxed,
55 : memory_order_relaxed);
56 :
57 20384932 : oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
58 20384932 : if (oldsize == 0)
59 14211 : oldsize = atomic_exchange_explicit(&mt->size, size,
60 : memory_order_relaxed);
61 20384932 : if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
62 3884 : atomic_store_explicit(&mt->size, SIZE_VAR,
63 : memory_order_relaxed);
64 :
65 : #ifdef HAVE_MALLOC_USABLE_SIZE
66 20384932 : size_t mallocsz = malloc_usable_size(ptr);
67 :
68 20384929 : current = mallocsz + atomic_fetch_add_explicit(&mt->total, mallocsz,
69 : memory_order_relaxed);
70 20384929 : oldsize = atomic_load_explicit(&mt->max_size, memory_order_relaxed);
71 20384929 : if (current > oldsize)
72 : /* note that this may fail, but approximation is sufficient */
73 10612921 : atomic_compare_exchange_weak_explicit(&mt->max_size, &oldsize,
74 : current,
75 : memory_order_relaxed,
76 : memory_order_relaxed);
77 : #endif
78 20384930 : }
79 :
80 19285672 : static inline void mt_count_free(struct memtype *mt, void *ptr)
81 : {
82 19285672 : frrtrace(2, frr_libfrr, memfree, mt, ptr);
83 :
84 19285669 : assert(mt->n_alloc);
85 19285669 : atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
86 :
87 : #ifdef HAVE_MALLOC_USABLE_SIZE
88 19285669 : size_t mallocsz = malloc_usable_size(ptr);
89 :
90 19285671 : atomic_fetch_sub_explicit(&mt->total, mallocsz, memory_order_relaxed);
91 : #endif
92 19285671 : }
93 :
94 20384919 : static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
95 : {
96 20384919 : frrtrace(3, frr_libfrr, memalloc, mt, ptr, size);
97 :
98 20384923 : if (__builtin_expect(ptr == NULL, 0)) {
99 0 : if (size) {
100 : /* malloc(0) is allowed to return NULL */
101 0 : memory_oom(size, mt->name);
102 : }
103 : return NULL;
104 : }
105 20384923 : mt_count_alloc(mt, size, ptr);
106 20384923 : return ptr;
107 : }
108 :
109 388049 : void *qmalloc(struct memtype *mt, size_t size)
110 : {
111 388049 : return mt_checkalloc(mt, malloc(size), size);
112 : }
113 :
114 14506573 : void *qcalloc(struct memtype *mt, size_t size)
115 : {
116 14506573 : return mt_checkalloc(mt, calloc(size, 1), size);
117 : }
118 :
119 1529299 : void *qrealloc(struct memtype *mt, void *ptr, size_t size)
120 : {
121 1529299 : if (ptr)
122 1526505 : mt_count_free(mt, ptr);
123 1529299 : return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
124 : }
125 :
126 3961076 : void *qstrdup(struct memtype *mt, const char *str)
127 : {
128 3961076 : return str ? mt_checkalloc(mt, strdup(str), strlen(str) + 1) : NULL;
129 : }
130 :
131 0 : void qcountfree(struct memtype *mt, void *ptr)
132 : {
133 0 : if (ptr)
134 0 : mt_count_free(mt, ptr);
135 0 : }
136 :
137 24077637 : void qfree(struct memtype *mt, void *ptr)
138 : {
139 24077637 : if (ptr)
140 17759164 : mt_count_free(mt, ptr);
141 24077639 : free(ptr);
142 24077639 : }
143 :
144 331 : int qmem_walk(qmem_walk_fn *func, void *arg)
145 : {
146 331 : struct memgroup *mg;
147 331 : struct memtype *mt;
148 331 : int rv;
149 :
150 1852 : for (mg = mg_first; mg; mg = mg->next) {
151 1521 : if ((rv = func(arg, mg, NULL)))
152 0 : return rv;
153 67847 : for (mt = mg->types; mt; mt = mt->next)
154 66326 : if ((rv = func(arg, mg, mt)))
155 0 : return rv;
156 : }
157 : return 0;
158 : }
159 :
160 : struct exit_dump_args {
161 : FILE *fp;
162 : const char *prefix;
163 : int error;
164 : struct memgroup *mg_printed;
165 : };
166 :
167 67847 : static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
168 : {
169 67847 : struct exit_dump_args *eda = arg;
170 67847 : char size[32];
171 :
172 67847 : if (!mt || !mt->n_alloc)
173 64190 : return 0;
174 3657 : if (!mg->active_at_exit)
175 2664 : eda->error++;
176 :
177 3657 : if (eda->mg_printed != mg) {
178 819 : if (eda->fp != stderr)
179 385 : fprintf(eda->fp, "%s: showing active allocations in memory group %s",
180 : eda->prefix, mg->name);
181 434 : else if (mg->active_at_exit)
182 190 : zlog_debug("%s: showing active allocations in memory group %s",
183 : eda->prefix, mg->name);
184 : else
185 244 : zlog_warn("%s: showing active allocations in memory group %s",
186 : eda->prefix, mg->name);
187 819 : eda->mg_printed = mg;
188 : }
189 :
190 3657 : snprintf(size, sizeof(size), "%10zu", mt->size);
191 3657 : if (eda->fp != stderr)
192 3510 : fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s",
193 1755 : eda->prefix, mt->name, mt->n_alloc,
194 1755 : mt->size == SIZE_VAR ? "(variably sized)" : size);
195 1902 : else if (mg->active_at_exit)
196 570 : zlog_debug("%s: memstats: %-30s: %6zu * %s",
197 : eda->prefix, mt->name, mt->n_alloc,
198 : mt->size == SIZE_VAR ? "(variably sized)" : size);
199 : else
200 1818 : zlog_warn("%s: memstats: %-30s: %6zu * %s",
201 : eda->prefix, mt->name, mt->n_alloc,
202 : mt->size == SIZE_VAR ? "(variably sized)" : size);
203 : return 0;
204 : }
205 :
206 331 : int log_memstats(FILE *fp, const char *prefix)
207 : {
208 331 : struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
209 331 : qmem_walk(qmem_exit_walker, &eda);
210 331 : return eda.error;
211 : }
|