Line data Source code
1 : /*
2 : * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 : *
4 : * Permission to use, copy, modify, and distribute this software for any
5 : * purpose with or without fee is hereby granted, provided that the above
6 : * copyright notice and this permission notice appear in all copies.
7 : *
8 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 : */
16 :
17 : #ifndef _FRR_TYPESAFE_H
18 : #define _FRR_TYPESAFE_H
19 :
20 : #include <stddef.h>
21 : #include <stdint.h>
22 : #include <stdbool.h>
23 : #include "compiler.h"
24 :
25 : #ifdef __cplusplus
26 : extern "C" {
27 : #endif
28 :
29 : /* generic macros for all list-like types */
30 :
31 : #define frr_each(prefix, head, item) \
32 : for (item = prefix##_first(head); item; \
33 : item = prefix##_next(head, item))
34 : #define frr_each_safe(prefix, head, item) \
35 : for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
36 : prefix##_next_safe(head, \
37 : (item = prefix##_first(head))); \
38 : item; \
39 : item = prefix##_safe, \
40 : prefix##_safe = prefix##_next_safe(head, prefix##_safe))
41 : #define frr_each_from(prefix, head, item, from) \
42 : for (item = from, from = prefix##_next_safe(head, item); \
43 : item; \
44 : item = from, from = prefix##_next_safe(head, from))
45 :
46 : /* reverse direction, only supported by a few containers */
47 :
48 : #define frr_rev_each(prefix, head, item) \
49 : for (item = prefix##_last(head); item; \
50 : item = prefix##_prev(head, item))
51 : #define frr_rev_each_safe(prefix, head, item) \
52 : for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \
53 : prefix##_prev_safe(head, \
54 : (item = prefix##_last(head))); \
55 : item; \
56 : item = prefix##_safe, \
57 : prefix##_safe = prefix##_prev_safe(head, prefix##_safe))
58 : #define frr_rev_each_from(prefix, head, item, from) \
59 : for (item = from, from = prefix##_prev_safe(head, item); \
60 : item; \
61 : item = from, from = prefix##_prev_safe(head, from))
62 :
63 : /* non-const variants. these wrappers are the same for all the types, so
64 : * bundle them together here.
65 : */
66 : #define TYPESAFE_FIRST_NEXT(prefix, type) \
67 : macro_pure type *prefix ## _first(struct prefix##_head *h) \
68 : { \
69 : return (type *)prefix ## _const_first(h); \
70 : } \
71 : macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
72 : { \
73 : return (type *)prefix ## _const_next(h, item); \
74 : } \
75 : /* ... */
76 : #define TYPESAFE_LAST_PREV(prefix, type) \
77 : macro_pure type *prefix ## _last(struct prefix##_head *h) \
78 : { \
79 : return (type *)prefix ## _const_last(h); \
80 : } \
81 : macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \
82 : { \
83 : return (type *)prefix ## _const_prev(h, item); \
84 : } \
85 : /* ... */
86 : #define TYPESAFE_FIND(prefix, type) \
87 : macro_inline type *prefix ## _find(struct prefix##_head *h, \
88 : const type *item) \
89 : { \
90 : return (type *)prefix ## _const_find(h, item); \
91 : } \
92 : /* ... */
93 : #define TYPESAFE_FIND_CMP(prefix, type) \
94 : macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
95 : const type *item) \
96 : { \
97 : return (type *)prefix ## _const_find_lt(h, item); \
98 : } \
99 : macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
100 : const type *item) \
101 : { \
102 : return (type *)prefix ## _const_find_gteq(h, item); \
103 : } \
104 : /* ... */
105 :
106 : /* *_member via find - when there is no better membership check than find() */
107 : #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
108 : macro_inline bool prefix ## _member(struct prefix##_head *h, \
109 : const type *item) \
110 : { \
111 : return item == prefix ## _const_find(h, item); \
112 : } \
113 : /* ... */
114 :
115 : /* *_member via find_gteq - same for non-unique containers */
116 : #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
117 : macro_inline bool prefix ## _member(struct prefix##_head *h, \
118 : const type *item) \
119 : { \
120 : const type *iter; \
121 : for (iter = prefix ## _const_find_gteq(h, item); iter; \
122 : iter = prefix ## _const_next(h, iter)) { \
123 : if (iter == item) \
124 : return true; \
125 : if (cmpfn(iter, item) > 0) \
126 : break; \
127 : } \
128 : return false; \
129 : } \
130 : /* ... */
131 :
132 : /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
133 : * head *AND* the head doesn't point to itself (= everything except LIST,
134 : * DLIST and SKIPLIST), just switch out the entire head
135 : */
136 : #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
137 : macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
138 : struct prefix##_head *b) \
139 : { \
140 : struct prefix##_head tmp = *a; \
141 : *a = *b; \
142 : *b = tmp; \
143 : } \
144 : /* ... */
145 :
146 : /* single-linked list, unsorted/arbitrary.
147 : * can be used as queue with add_tail / pop
148 : */
149 :
150 : /* don't use these structs directly */
151 : struct slist_item {
152 : struct slist_item *next;
153 : };
154 :
155 : struct slist_head {
156 : struct slist_item *first, **last_next;
157 : size_t count;
158 : };
159 :
160 : /* this replaces NULL as the value for ->next on the last item. */
161 : extern struct slist_item typesafe_slist_sentinel;
162 : #define _SLIST_LAST &typesafe_slist_sentinel
163 :
164 66210 : static inline void typesafe_list_add(struct slist_head *head,
165 : struct slist_item **pos, struct slist_item *item)
166 : {
167 66210 : item->next = *pos;
168 66210 : *pos = item;
169 66210 : if (pos == head->last_next)
170 65771 : head->last_next = &item->next;
171 66210 : head->count++;
172 : }
173 :
174 : extern bool typesafe_list_member(const struct slist_head *head,
175 : const struct slist_item *item);
176 :
177 : /* use as:
178 : *
179 : * PREDECL_LIST(namelist);
180 : * struct name {
181 : * struct namelist_item nlitem;
182 : * }
183 : * DECLARE_LIST(namelist, struct name, nlitem);
184 : */
185 : #define PREDECL_LIST(prefix) \
186 : struct prefix ## _head { struct slist_head sh; }; \
187 : struct prefix ## _item { struct slist_item si; }; \
188 : MACRO_REQUIRE_SEMICOLON() /* end */
189 :
190 : #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
191 :
192 : #define DECLARE_LIST(prefix, type, field) \
193 : \
194 : macro_inline void prefix ## _init(struct prefix##_head *h) \
195 : { \
196 : memset(h, 0, sizeof(*h)); \
197 : h->sh.first = _SLIST_LAST; \
198 : h->sh.last_next = &h->sh.first; \
199 : } \
200 : macro_inline void prefix ## _fini(struct prefix##_head *h) \
201 : { \
202 : memset(h, 0, sizeof(*h)); \
203 : } \
204 : macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
205 : { \
206 : typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
207 : } \
208 : macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
209 : { \
210 : typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
211 : } \
212 : macro_inline void prefix ## _add_after(struct prefix##_head *h, \
213 : type *after, type *item) \
214 : { \
215 : struct slist_item **nextp; \
216 : nextp = after ? &after->field.si.next : &h->sh.first; \
217 : typesafe_list_add(&h->sh, nextp, &item->field.si); \
218 : } \
219 : /* TODO: del_hint */ \
220 : macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
221 : { \
222 : struct slist_item **iter = &h->sh.first; \
223 : while (*iter != _SLIST_LAST && *iter != &item->field.si) \
224 : iter = &(*iter)->next; \
225 : if (*iter == _SLIST_LAST) \
226 : return NULL; \
227 : h->sh.count--; \
228 : *iter = item->field.si.next; \
229 : if (item->field.si.next == _SLIST_LAST) \
230 : h->sh.last_next = iter; \
231 : item->field.si.next = NULL; \
232 : return item; \
233 : } \
234 : macro_inline type *prefix ## _pop(struct prefix##_head *h) \
235 : { \
236 : struct slist_item *sitem = h->sh.first; \
237 : if (sitem == _SLIST_LAST) \
238 : return NULL; \
239 : h->sh.count--; \
240 : h->sh.first = sitem->next; \
241 : if (h->sh.first == _SLIST_LAST) \
242 : h->sh.last_next = &h->sh.first; \
243 : sitem->next = NULL; \
244 : return container_of(sitem, type, field.si); \
245 : } \
246 : macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
247 : struct prefix##_head *b) \
248 : { \
249 : struct prefix##_head tmp = *a; \
250 : *a = *b; \
251 : *b = tmp; \
252 : if (a->sh.last_next == &b->sh.first) \
253 : a->sh.last_next = &a->sh.first; \
254 : if (b->sh.last_next == &a->sh.first) \
255 : b->sh.last_next = &b->sh.first; \
256 : } \
257 : macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
258 : { \
259 : if (h->sh.first != _SLIST_LAST) \
260 : return container_of(h->sh.first, type, field.si); \
261 : return NULL; \
262 : } \
263 : macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
264 : const type *item) \
265 : { \
266 : const struct slist_item *sitem = &item->field.si; \
267 : if (sitem->next != _SLIST_LAST) \
268 : return container_of(sitem->next, type, field.si); \
269 : return NULL; \
270 : } \
271 : TYPESAFE_FIRST_NEXT(prefix, type) \
272 : macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
273 : { \
274 : struct slist_item *sitem; \
275 : if (!item) \
276 : return NULL; \
277 : sitem = &item->field.si; \
278 : if (sitem->next != _SLIST_LAST) \
279 : return container_of(sitem->next, type, field.si); \
280 : return NULL; \
281 : } \
282 : macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
283 : { \
284 : return h->sh.count; \
285 : } \
286 : macro_pure bool prefix ## _anywhere(const type *item) \
287 : { \
288 : return item->field.si.next != NULL; \
289 : } \
290 : macro_pure bool prefix ## _member(const struct prefix##_head *h, \
291 : const type *item) \
292 : { \
293 : return typesafe_list_member(&h->sh, &item->field.si); \
294 : } \
295 : MACRO_REQUIRE_SEMICOLON() /* end */
296 :
297 : /* don't use these structs directly */
298 : struct dlist_item {
299 : struct dlist_item *next;
300 : struct dlist_item *prev;
301 : };
302 :
303 : struct dlist_head {
304 : struct dlist_item hitem;
305 : size_t count;
306 : };
307 :
308 35118 : static inline void typesafe_dlist_add(struct dlist_head *head,
309 : struct dlist_item *prev, struct dlist_item *item)
310 : {
311 : /* SA on clang-11 thinks this can happen, but in reality -assuming no
312 : * memory corruption- it can't. DLIST uses a "closed" ring, i.e. the
313 : * termination at the end of the list is not NULL but rather a pointer
314 : * back to the head. (This eliminates special-casing the first or last
315 : * item.)
316 : *
317 : * Sadly, can't use assert() here since the libfrr assert / xref code
318 : * uses typesafe lists itself... that said, if an assert tripped here
319 : * we'd already be way past some memory corruption, so we might as
320 : * well just take the SEGV. (In the presence of corruption, we'd see
321 : * random SEGVs from places that make no sense at all anyway, an
322 : * assert might actually be a red herring.)
323 : *
324 : * ("assume()" tells the compiler to produce code as if the condition
325 : * will always hold; it doesn't have any actual effect here, it'll
326 : * just SEGV out on "item->next->prev = item".)
327 : */
328 33906 : assume(prev->next != NULL);
329 :
330 35397 : item->next = prev->next;
331 35397 : item->next->prev = item;
332 35397 : item->prev = prev;
333 35397 : prev->next = item;
334 29945 : head->count++;
335 : }
336 :
337 : static inline void typesafe_dlist_swap_all(struct dlist_head *a,
338 : struct dlist_head *b)
339 : {
340 : struct dlist_head tmp = *a;
341 :
342 : a->count = b->count;
343 : if (a->count) {
344 : a->hitem.next = b->hitem.next;
345 : a->hitem.prev = b->hitem.prev;
346 : a->hitem.next->prev = &a->hitem;
347 : a->hitem.prev->next = &a->hitem;
348 : } else {
349 : a->hitem.next = &a->hitem;
350 : a->hitem.prev = &a->hitem;
351 : }
352 :
353 : b->count = tmp.count;
354 : if (b->count) {
355 : b->hitem.next = tmp.hitem.next;
356 : b->hitem.prev = tmp.hitem.prev;
357 : b->hitem.next->prev = &b->hitem;
358 : b->hitem.prev->next = &b->hitem;
359 : } else {
360 : b->hitem.next = &b->hitem;
361 : b->hitem.prev = &b->hitem;
362 : }
363 : }
364 :
365 : extern bool typesafe_dlist_member(const struct dlist_head *head,
366 : const struct dlist_item *item);
367 :
368 : /* double-linked list, for fast item deletion
369 : */
370 : #define PREDECL_DLIST(prefix) \
371 : struct prefix ## _head { struct dlist_head dh; }; \
372 : struct prefix ## _item { struct dlist_item di; }; \
373 : MACRO_REQUIRE_SEMICOLON() /* end */
374 :
375 : #define INIT_DLIST(var) { .dh = { \
376 : .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
377 :
378 : #define DECLARE_DLIST(prefix, type, field) \
379 : \
380 : macro_inline void prefix ## _init(struct prefix##_head *h) \
381 : { \
382 : memset(h, 0, sizeof(*h)); \
383 : h->dh.hitem.prev = &h->dh.hitem; \
384 : h->dh.hitem.next = &h->dh.hitem; \
385 : } \
386 : macro_inline void prefix ## _fini(struct prefix##_head *h) \
387 : { \
388 : memset(h, 0, sizeof(*h)); \
389 : } \
390 : macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
391 : { \
392 : typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
393 : } \
394 : macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
395 : { \
396 : typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
397 : } \
398 : macro_inline void prefix ## _add_after(struct prefix##_head *h, \
399 : type *after, type *item) \
400 : { \
401 : struct dlist_item *prev; \
402 : prev = after ? &after->field.di : &h->dh.hitem; \
403 : typesafe_dlist_add(&h->dh, prev, &item->field.di); \
404 : } \
405 : macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
406 : { \
407 : struct dlist_item *ditem = &item->field.di; \
408 : ditem->prev->next = ditem->next; \
409 : ditem->next->prev = ditem->prev; \
410 : h->dh.count--; \
411 : ditem->prev = ditem->next = NULL; \
412 : return item; \
413 : } \
414 : macro_inline type *prefix ## _pop(struct prefix##_head *h) \
415 : { \
416 : struct dlist_item *ditem = h->dh.hitem.next; \
417 : if (ditem == &h->dh.hitem) \
418 : return NULL; \
419 : ditem->prev->next = ditem->next; \
420 : ditem->next->prev = ditem->prev; \
421 : h->dh.count--; \
422 : ditem->prev = ditem->next = NULL; \
423 : return container_of(ditem, type, field.di); \
424 : } \
425 : macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
426 : struct prefix##_head *b) \
427 : { \
428 : typesafe_dlist_swap_all(&a->dh, &b->dh); \
429 : } \
430 : macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
431 : { \
432 : const struct dlist_item *ditem = h->dh.hitem.next; \
433 : if (ditem == &h->dh.hitem) \
434 : return NULL; \
435 : return container_of(ditem, type, field.di); \
436 : } \
437 : macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
438 : const type *item) \
439 : { \
440 : const struct dlist_item *ditem = &item->field.di; \
441 : if (ditem->next == &h->dh.hitem) \
442 : return NULL; \
443 : return container_of(ditem->next, type, field.di); \
444 : } \
445 : TYPESAFE_FIRST_NEXT(prefix, type) \
446 : macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
447 : { \
448 : const struct dlist_item *ditem = h->dh.hitem.prev; \
449 : if (ditem == &h->dh.hitem) \
450 : return NULL; \
451 : return container_of(ditem, type, field.di); \
452 : } \
453 : macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
454 : const type *item) \
455 : { \
456 : const struct dlist_item *ditem = &item->field.di; \
457 : if (ditem->prev == &h->dh.hitem) \
458 : return NULL; \
459 : return container_of(ditem->prev, type, field.di); \
460 : } \
461 : TYPESAFE_LAST_PREV(prefix, type) \
462 : macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
463 : { \
464 : if (!item) \
465 : return NULL; \
466 : return prefix ## _next(h, item); \
467 : } \
468 : macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
469 : { \
470 : if (!item) \
471 : return NULL; \
472 : return prefix ## _prev(h, item); \
473 : } \
474 : macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
475 : { \
476 : return h->dh.count; \
477 : } \
478 : macro_pure bool prefix ## _anywhere(const type *item) \
479 : { \
480 : const struct dlist_item *ditem = &item->field.di; \
481 : return ditem->next && ditem->prev; \
482 : } \
483 : macro_pure bool prefix ## _member(const struct prefix##_head *h, \
484 : const type *item) \
485 : { \
486 : return typesafe_dlist_member(&h->dh, &item->field.di); \
487 : } \
488 : MACRO_REQUIRE_SEMICOLON() /* end */
489 :
490 : /* note: heap currently caps out at 4G items */
491 :
492 : #define HEAP_NARY 8U
493 : typedef uint32_t heap_index_i;
494 :
495 : struct heap_item {
496 : uint32_t index;
497 : };
498 :
499 : struct heap_head {
500 : struct heap_item **array;
501 : uint32_t arraysz, count;
502 : };
503 :
504 : #define HEAP_RESIZE_TRESH_UP(h) \
505 : (h->hh.count + 1 >= h->hh.arraysz)
506 : #define HEAP_RESIZE_TRESH_DN(h) \
507 : (h->hh.count == 0 || \
508 : h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
509 :
510 : #define PREDECL_HEAP(prefix) \
511 : struct prefix ## _head { struct heap_head hh; }; \
512 : struct prefix ## _item { struct heap_item hi; }; \
513 : MACRO_REQUIRE_SEMICOLON() /* end */
514 :
515 : #define INIT_HEAP(var) { }
516 :
517 : #define DECLARE_HEAP(prefix, type, field, cmpfn) \
518 : \
519 : macro_inline void prefix ## _init(struct prefix##_head *h) \
520 : { \
521 : memset(h, 0, sizeof(*h)); \
522 : } \
523 : macro_inline void prefix ## _fini(struct prefix##_head *h) \
524 : { \
525 : assert(h->hh.count == 0); \
526 : memset(h, 0, sizeof(*h)); \
527 : } \
528 : macro_inline int prefix ## __cmp(const struct heap_item *a, \
529 : const struct heap_item *b) \
530 : { \
531 : return cmpfn(container_of(a, type, field.hi), \
532 : container_of(b, type, field.hi)); \
533 : } \
534 : macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
535 : { \
536 : if (HEAP_RESIZE_TRESH_UP(h)) \
537 : typesafe_heap_resize(&h->hh, true); \
538 : typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
539 : prefix ## __cmp); \
540 : h->hh.count++; \
541 : return NULL; \
542 : } \
543 : macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
544 : { \
545 : struct heap_item *other; \
546 : uint32_t index = item->field.hi.index; \
547 : assert(h->hh.array[index] == &item->field.hi); \
548 : h->hh.count--; \
549 : other = h->hh.array[h->hh.count]; \
550 : if (cmpfn(container_of(other, type, field.hi), item) < 0) \
551 : typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
552 : else \
553 : typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
554 : if (HEAP_RESIZE_TRESH_DN(h)) \
555 : typesafe_heap_resize(&h->hh, false); \
556 : return item; \
557 : } \
558 : macro_inline type *prefix ## _pop(struct prefix##_head *h) \
559 : { \
560 : struct heap_item *hitem, *other; \
561 : if (h->hh.count == 0) \
562 : return NULL; \
563 : hitem = h->hh.array[0]; \
564 : h->hh.count--; \
565 : other = h->hh.array[h->hh.count]; \
566 : typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
567 : if (HEAP_RESIZE_TRESH_DN(h)) \
568 : typesafe_heap_resize(&h->hh, false); \
569 : return container_of(hitem, type, field.hi); \
570 : } \
571 : TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
572 : macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
573 : { \
574 : if (h->hh.count == 0) \
575 : return NULL; \
576 : return container_of(h->hh.array[0], type, field.hi); \
577 : } \
578 : macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
579 : const type *item) \
580 : { \
581 : uint32_t idx = item->field.hi.index + 1; \
582 : if (idx >= h->hh.count) \
583 : return NULL; \
584 : return container_of(h->hh.array[idx], type, field.hi); \
585 : } \
586 : TYPESAFE_FIRST_NEXT(prefix, type) \
587 : macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
588 : { \
589 : if (!item) \
590 : return NULL; \
591 : return prefix ## _next(h, item); \
592 : } \
593 : macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
594 : { \
595 : return h->hh.count; \
596 : } \
597 : macro_pure bool prefix ## _member(const struct prefix##_head *h, \
598 : const type *item) \
599 : { \
600 : uint32_t idx = item->field.hi.index; \
601 : if (idx >= h->hh.count) \
602 : return false; \
603 : return h->hh.array[idx] == &item->field.hi; \
604 : } \
605 : MACRO_REQUIRE_SEMICOLON() /* end */
606 :
607 : extern void typesafe_heap_resize(struct heap_head *head, bool grow);
608 : extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
609 : struct heap_item *item,
610 : int (*cmpfn)(const struct heap_item *a,
611 : const struct heap_item *b));
612 : extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
613 : struct heap_item *item,
614 : int (*cmpfn)(const struct heap_item *a,
615 : const struct heap_item *b));
616 :
617 : /* single-linked list, sorted.
618 : * can be used as priority queue with add / pop
619 : */
620 :
621 : /* don't use these structs directly */
622 : struct ssort_item {
623 : struct ssort_item *next;
624 : };
625 :
626 : struct ssort_head {
627 : struct ssort_item *first;
628 : size_t count;
629 : };
630 :
631 : /* use as:
632 : *
633 : * PREDECL_SORTLIST(namelist)
634 : * struct name {
635 : * struct namelist_item nlitem;
636 : * }
637 : * DECLARE_SORTLIST(namelist, struct name, nlitem)
638 : */
639 : #define _PREDECL_SORTLIST(prefix) \
640 : struct prefix ## _head { struct ssort_head sh; }; \
641 : struct prefix ## _item { struct ssort_item si; }; \
642 : MACRO_REQUIRE_SEMICOLON() /* end */
643 :
644 : #define INIT_SORTLIST_UNIQ(var) { }
645 : #define INIT_SORTLIST_NONUNIQ(var) { }
646 :
647 : #define PREDECL_SORTLIST_UNIQ(prefix) \
648 : _PREDECL_SORTLIST(prefix)
649 : #define PREDECL_SORTLIST_NONUNIQ(prefix) \
650 : _PREDECL_SORTLIST(prefix)
651 :
652 : #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
653 : \
654 : macro_inline void prefix ## _init(struct prefix##_head *h) \
655 : { \
656 : memset(h, 0, sizeof(*h)); \
657 : } \
658 : macro_inline void prefix ## _fini(struct prefix##_head *h) \
659 : { \
660 : memset(h, 0, sizeof(*h)); \
661 : } \
662 : macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
663 : { \
664 : struct ssort_item **np = &h->sh.first; \
665 : int c = 1; \
666 : while (*np && (c = cmpfn_uq( \
667 : container_of(*np, type, field.si), item)) < 0) \
668 : np = &(*np)->next; \
669 : if (c == 0) \
670 : return container_of(*np, type, field.si); \
671 : item->field.si.next = *np; \
672 : *np = &item->field.si; \
673 : h->sh.count++; \
674 : return NULL; \
675 : } \
676 : macro_inline const type *prefix ## _const_find_gteq( \
677 : const struct prefix##_head *h, const type *item) \
678 : { \
679 : const struct ssort_item *sitem = h->sh.first; \
680 : int cmpval = 0; \
681 : while (sitem && (cmpval = cmpfn_nuq( \
682 : container_of(sitem, type, field.si), item)) < 0) \
683 : sitem = sitem->next; \
684 : return container_of_null(sitem, type, field.si); \
685 : } \
686 : macro_inline const type *prefix ## _const_find_lt( \
687 : const struct prefix##_head *h, const type *item) \
688 : { \
689 : const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
690 : int cmpval = 0; \
691 : while (sitem && (cmpval = cmpfn_nuq( \
692 : container_of(sitem, type, field.si), item)) < 0) \
693 : sitem = (prev = sitem)->next; \
694 : return container_of_null(prev, type, field.si); \
695 : } \
696 : TYPESAFE_FIND_CMP(prefix, type) \
697 : /* TODO: del_hint */ \
698 : macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
699 : { \
700 : struct ssort_item **iter = &h->sh.first; \
701 : while (*iter && *iter != &item->field.si) \
702 : iter = &(*iter)->next; \
703 : if (!*iter) \
704 : return NULL; \
705 : h->sh.count--; \
706 : *iter = item->field.si.next; \
707 : item->field.si.next = NULL; \
708 : return item; \
709 : } \
710 : macro_inline type *prefix ## _pop(struct prefix##_head *h) \
711 : { \
712 : struct ssort_item *sitem = h->sh.first; \
713 : if (!sitem) \
714 : return NULL; \
715 : h->sh.count--; \
716 : h->sh.first = sitem->next; \
717 : return container_of(sitem, type, field.si); \
718 : } \
719 : TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
720 : macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
721 : { \
722 : return container_of_null(h->sh.first, type, field.si); \
723 : } \
724 : macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
725 : const type *item) \
726 : { \
727 : const struct ssort_item *sitem = &item->field.si; \
728 : return container_of_null(sitem->next, type, field.si); \
729 : } \
730 : TYPESAFE_FIRST_NEXT(prefix, type) \
731 : macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
732 : { \
733 : struct ssort_item *sitem; \
734 : if (!item) \
735 : return NULL; \
736 : sitem = &item->field.si; \
737 : return container_of_null(sitem->next, type, field.si); \
738 : } \
739 : macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
740 : { \
741 : return h->sh.count; \
742 : } \
743 : MACRO_REQUIRE_SEMICOLON() /* end */
744 :
745 : #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
746 : _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
747 : \
748 : macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
749 : const type *item) \
750 : { \
751 : const struct ssort_item *sitem = h->sh.first; \
752 : int cmpval = 0; \
753 : while (sitem && (cmpval = cmpfn( \
754 : container_of(sitem, type, field.si), item)) < 0) \
755 : sitem = sitem->next; \
756 : if (!sitem || cmpval > 0) \
757 : return NULL; \
758 : return container_of(sitem, type, field.si); \
759 : } \
760 : TYPESAFE_FIND(prefix, type) \
761 : TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
762 : MACRO_REQUIRE_SEMICOLON() /* end */
763 :
764 : #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
765 : macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
766 : { \
767 : int cmpval = cmpfn(a, b); \
768 : if (cmpval) \
769 : return cmpval; \
770 : if (a < b) \
771 : return -1; \
772 : if (a > b) \
773 : return 1; \
774 : return 0; \
775 : } \
776 : _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
777 : TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
778 : MACRO_REQUIRE_SEMICOLON() /* end */
779 :
780 :
781 : /* hash, "sorted" by hash value
782 : */
783 :
784 : /* don't use these structs directly */
785 : struct thash_item {
786 : struct thash_item *next;
787 : uint32_t hashval;
788 : };
789 :
790 : struct thash_head {
791 : struct thash_item **entries;
792 : uint32_t count;
793 :
794 : uint8_t tabshift;
795 : uint8_t minshift, maxshift;
796 : };
797 :
798 : #define _HASH_SIZE(tabshift) \
799 : ((1U << (tabshift)) >> 1)
800 : #define HASH_SIZE(head) \
801 : _HASH_SIZE((head).tabshift)
802 : #define _HASH_KEY(tabshift, val) \
803 : ((val) >> (33 - (tabshift)))
804 : #define HASH_KEY(head, val) \
805 : _HASH_KEY((head).tabshift, val)
806 : #define HASH_GROW_THRESHOLD(head) \
807 : ((head).count >= HASH_SIZE(head))
808 : #define HASH_SHRINK_THRESHOLD(head) \
809 : ((head).count <= (HASH_SIZE(head) - 1) / 2)
810 :
811 : extern void typesafe_hash_grow(struct thash_head *head);
812 : extern void typesafe_hash_shrink(struct thash_head *head);
813 :
814 : /* use as:
815 : *
816 : * PREDECL_HASH(namelist)
817 : * struct name {
818 : * struct namelist_item nlitem;
819 : * }
820 : * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
821 : */
822 : #define PREDECL_HASH(prefix) \
823 : struct prefix ## _head { struct thash_head hh; }; \
824 : struct prefix ## _item { struct thash_item hi; }; \
825 : MACRO_REQUIRE_SEMICOLON() /* end */
826 :
827 : #define INIT_HASH(var) { }
828 :
829 : #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
830 : \
831 : macro_inline void prefix ## _init(struct prefix##_head *h) \
832 : { \
833 : memset(h, 0, sizeof(*h)); \
834 : } \
835 : macro_inline void prefix ## _fini(struct prefix##_head *h) \
836 : { \
837 : assert(h->hh.count == 0); \
838 : h->hh.minshift = 0; \
839 : typesafe_hash_shrink(&h->hh); \
840 : memset(h, 0, sizeof(*h)); \
841 : } \
842 : macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
843 : { \
844 : h->hh.count++; \
845 : if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
846 : typesafe_hash_grow(&h->hh); \
847 : \
848 : uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
849 : item->field.hi.hashval = hval; \
850 : struct thash_item **np = &h->hh.entries[hbits]; \
851 : while (*np && (*np)->hashval < hval) \
852 : np = &(*np)->next; \
853 : while (*np && (*np)->hashval == hval) { \
854 : if (cmpfn(container_of(*np, type, field.hi), item) == 0) { \
855 : h->hh.count--; \
856 : return container_of(*np, type, field.hi); \
857 : } \
858 : np = &(*np)->next; \
859 : } \
860 : item->field.hi.next = *np; \
861 : *np = &item->field.hi; \
862 : return NULL; \
863 : } \
864 : macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
865 : const type *item) \
866 : { \
867 : if (!h->hh.tabshift) \
868 : return NULL; \
869 : uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
870 : const struct thash_item *hitem = h->hh.entries[hbits]; \
871 : while (hitem && hitem->hashval < hval) \
872 : hitem = hitem->next; \
873 : while (hitem && hitem->hashval == hval) { \
874 : if (!cmpfn(container_of(hitem, type, field.hi), item)) \
875 : return container_of(hitem, type, field.hi); \
876 : hitem = hitem->next; \
877 : } \
878 : return NULL; \
879 : } \
880 : TYPESAFE_FIND(prefix, type) \
881 : macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
882 : { \
883 : if (!h->hh.tabshift) \
884 : return NULL; \
885 : uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
886 : struct thash_item **np = &h->hh.entries[hbits]; \
887 : while (*np && (*np)->hashval < hval) \
888 : np = &(*np)->next; \
889 : while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
890 : np = &(*np)->next; \
891 : if (*np != &item->field.hi) \
892 : return NULL; \
893 : *np = item->field.hi.next; \
894 : item->field.hi.next = NULL; \
895 : h->hh.count--; \
896 : if (HASH_SHRINK_THRESHOLD(h->hh)) \
897 : typesafe_hash_shrink(&h->hh); \
898 : return item; \
899 : } \
900 : macro_inline type *prefix ## _pop(struct prefix##_head *h) \
901 : { \
902 : uint32_t i; \
903 : for (i = 0; i < HASH_SIZE(h->hh); i++) \
904 : if (h->hh.entries[i]) { \
905 : struct thash_item *hitem = h->hh.entries[i]; \
906 : h->hh.entries[i] = hitem->next; \
907 : h->hh.count--; \
908 : hitem->next = NULL; \
909 : if (HASH_SHRINK_THRESHOLD(h->hh)) \
910 : typesafe_hash_shrink(&h->hh); \
911 : return container_of(hitem, type, field.hi); \
912 : } \
913 : return NULL; \
914 : } \
915 : TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
916 : macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
917 : { \
918 : uint32_t i; \
919 : for (i = 0; i < HASH_SIZE(h->hh); i++) \
920 : if (h->hh.entries[i]) \
921 : return container_of(h->hh.entries[i], type, field.hi); \
922 : return NULL; \
923 : } \
924 : macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
925 : const type *item) \
926 : { \
927 : const struct thash_item *hitem = &item->field.hi; \
928 : if (hitem->next) \
929 : return container_of(hitem->next, type, field.hi); \
930 : uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
931 : for (; i < HASH_SIZE(h->hh); i++) \
932 : if (h->hh.entries[i]) \
933 : return container_of(h->hh.entries[i], type, field.hi); \
934 : return NULL; \
935 : } \
936 : TYPESAFE_FIRST_NEXT(prefix, type) \
937 : macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
938 : { \
939 : if (!item) \
940 : return NULL; \
941 : return prefix ## _next(h, item); \
942 : } \
943 : macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
944 : { \
945 : return h->hh.count; \
946 : } \
947 : macro_pure bool prefix ## _member(const struct prefix##_head *h, \
948 : const type *item) \
949 : { \
950 : uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
951 : const struct thash_item *hitem = h->hh.entries[hbits]; \
952 : while (hitem && hitem->hashval < hval) \
953 : hitem = hitem->next; \
954 : for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
955 : hitem = hitem->next) \
956 : if (hitem == &item->field.hi) \
957 : return true; \
958 : return false; \
959 : } \
960 : MACRO_REQUIRE_SEMICOLON() /* end */
961 :
962 : /* skiplist, sorted.
963 : * can be used as priority queue with add / pop
964 : */
965 :
966 : /* don't use these structs directly */
967 : #define SKIPLIST_MAXDEPTH 16
968 : #define SKIPLIST_EMBED 4
969 : #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
970 :
971 : struct sskip_item {
972 : struct sskip_item *next[SKIPLIST_EMBED];
973 : };
974 :
975 : struct sskip_overflow {
976 : struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
977 : };
978 :
979 : struct sskip_head {
980 : struct sskip_item hitem;
981 : struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
982 : size_t count;
983 : };
984 :
985 : /* use as:
986 : *
987 : * PREDECL_SKIPLIST(namelist)
988 : * struct name {
989 : * struct namelist_item nlitem;
990 : * }
991 : * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
992 : */
993 : #define _PREDECL_SKIPLIST(prefix) \
994 : struct prefix ## _head { struct sskip_head sh; }; \
995 : struct prefix ## _item { struct sskip_item si; }; \
996 : MACRO_REQUIRE_SEMICOLON() /* end */
997 :
998 : #define INIT_SKIPLIST_UNIQ(var) { }
999 : #define INIT_SKIPLIST_NONUNIQ(var) { }
1000 :
1001 : #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
1002 : \
1003 : macro_inline void prefix ## _init(struct prefix##_head *h) \
1004 : { \
1005 : memset(h, 0, sizeof(*h)); \
1006 : h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1007 : ((uintptr_t)h->sh.overflow | 1); \
1008 : } \
1009 : macro_inline void prefix ## _fini(struct prefix##_head *h) \
1010 : { \
1011 : memset(h, 0, sizeof(*h)); \
1012 : } \
1013 : macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
1014 : { \
1015 : struct sskip_item *si; \
1016 : si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
1017 : return container_of_null(si, type, field.si); \
1018 : } \
1019 : macro_inline const type *prefix ## _const_find_gteq( \
1020 : const struct prefix##_head *h, const type *item) \
1021 : { \
1022 : const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
1023 : &item->field.si, cmpfn_nuq); \
1024 : return container_of_null(sitem, type, field.si); \
1025 : } \
1026 : macro_inline const type *prefix ## _const_find_lt( \
1027 : const struct prefix##_head *h, const type *item) \
1028 : { \
1029 : const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
1030 : &item->field.si, cmpfn_nuq); \
1031 : return container_of_null(sitem, type, field.si); \
1032 : } \
1033 : TYPESAFE_FIND_CMP(prefix, type) \
1034 : macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
1035 : { \
1036 : struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
1037 : &item->field.si, cmpfn_uq); \
1038 : return container_of_null(sitem, type, field.si); \
1039 : } \
1040 : macro_inline type *prefix ## _pop(struct prefix##_head *h) \
1041 : { \
1042 : struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
1043 : return container_of_null(sitem, type, field.si); \
1044 : } \
1045 : macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
1046 : struct prefix##_head *b) \
1047 : { \
1048 : struct prefix##_head tmp = *a; \
1049 : *a = *b; \
1050 : *b = tmp; \
1051 : a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1052 : ((uintptr_t)a->sh.overflow | 1); \
1053 : b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1054 : ((uintptr_t)b->sh.overflow | 1); \
1055 : } \
1056 : macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
1057 : { \
1058 : const struct sskip_item *first = h->sh.hitem.next[0]; \
1059 : return container_of_null(first, type, field.si); \
1060 : } \
1061 : macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
1062 : const type *item) \
1063 : { \
1064 : const struct sskip_item *next = item->field.si.next[0]; \
1065 : return container_of_null(next, type, field.si); \
1066 : } \
1067 : TYPESAFE_FIRST_NEXT(prefix, type) \
1068 : macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1069 : { \
1070 : struct sskip_item *next; \
1071 : next = item ? item->field.si.next[0] : NULL; \
1072 : return container_of_null(next, type, field.si); \
1073 : } \
1074 : macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1075 : { \
1076 : return h->sh.count; \
1077 : } \
1078 : MACRO_REQUIRE_SEMICOLON() /* end */
1079 :
1080 : #define PREDECL_SKIPLIST_UNIQ(prefix) \
1081 : _PREDECL_SKIPLIST(prefix)
1082 : #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1083 : \
1084 : macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1085 : const struct sskip_item *b) \
1086 : { \
1087 : return cmpfn(container_of(a, type, field.si), \
1088 : container_of(b, type, field.si)); \
1089 : } \
1090 : macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1091 : const type *item) \
1092 : { \
1093 : const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1094 : &item->field.si, &prefix ## __cmp); \
1095 : return container_of_null(sitem, type, field.si); \
1096 : } \
1097 : TYPESAFE_FIND(prefix, type) \
1098 : TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1099 : \
1100 : _DECLARE_SKIPLIST(prefix, type, field, \
1101 : prefix ## __cmp, prefix ## __cmp); \
1102 : MACRO_REQUIRE_SEMICOLON() /* end */
1103 :
1104 : #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1105 : _PREDECL_SKIPLIST(prefix)
1106 : #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1107 : \
1108 : macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1109 : const struct sskip_item *b) \
1110 : { \
1111 : return cmpfn(container_of(a, type, field.si), \
1112 : container_of(b, type, field.si)); \
1113 : } \
1114 : macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1115 : const struct sskip_item *b) \
1116 : { \
1117 : int cmpval = cmpfn(container_of(a, type, field.si), \
1118 : container_of(b, type, field.si)); \
1119 : if (cmpval) \
1120 : return cmpval; \
1121 : if (a < b) \
1122 : return -1; \
1123 : if (a > b) \
1124 : return 1; \
1125 : return 0; \
1126 : } \
1127 : \
1128 : _DECLARE_SKIPLIST(prefix, type, field, \
1129 : prefix ## __cmp, prefix ## __cmp_uq); \
1130 : TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1131 : MACRO_REQUIRE_SEMICOLON() /* end */
1132 :
1133 :
1134 : extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
1135 : struct sskip_item *item, int (*cmpfn)(
1136 : const struct sskip_item *a,
1137 : const struct sskip_item *b));
1138 : extern const struct sskip_item *typesafe_skiplist_find(
1139 : const struct sskip_head *head,
1140 : const struct sskip_item *item, int (*cmpfn)(
1141 : const struct sskip_item *a,
1142 : const struct sskip_item *b));
1143 : extern const struct sskip_item *typesafe_skiplist_find_gteq(
1144 : const struct sskip_head *head,
1145 : const struct sskip_item *item, int (*cmpfn)(
1146 : const struct sskip_item *a,
1147 : const struct sskip_item *b));
1148 : extern const struct sskip_item *typesafe_skiplist_find_lt(
1149 : const struct sskip_head *head,
1150 : const struct sskip_item *item, int (*cmpfn)(
1151 : const struct sskip_item *a,
1152 : const struct sskip_item *b));
1153 : extern struct sskip_item *typesafe_skiplist_del(
1154 : struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
1155 : const struct sskip_item *a,
1156 : const struct sskip_item *b));
1157 : extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
1158 :
1159 : #ifdef __cplusplus
1160 : }
1161 : #endif
1162 :
1163 : /* this needs to stay at the end because both files include each other.
1164 : * the resolved order is typesafe.h before typerb.h
1165 : */
1166 : #include "typerb.h"
1167 :
1168 : #endif /* _FRR_TYPESAFE_H */
|