Line data Source code
1 : /*
2 : * Addpath TX ID selection, and related utilities
3 : * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
4 : *
5 : * This program is free software; you can redistribute it and/or modify it
6 : * under the terms of the GNU General Public License as published by the Free
7 : * Software Foundation; either version 2 of the License, or (at your option)
8 : * any later version.
9 : *
10 : * This program is distributed in the hope that it will be useful, but WITHOUT
11 : * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 : * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 : * more details.
14 : *
15 : * You should have received a copy of the GNU General Public License along
16 : * with this program; see the file COPYING; if not, write to the Free Software
17 : * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 : */
19 :
20 : #ifdef HAVE_CONFIG_H
21 : #include "config.h"
22 : #endif
23 :
24 : #include "bgp_addpath.h"
25 : #include "bgp_route.h"
26 :
27 : static const struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
28 : {
29 : .config_name = "addpath-tx-all-paths",
30 : .human_name = "All",
31 : .human_description = "Advertise all paths via addpath",
32 : .type_json_name = "addpathTxAllPaths",
33 : .id_json_name = "addpathTxIdAll"
34 : },
35 : {
36 : .config_name = "addpath-tx-bestpath-per-AS",
37 : .human_name = "Best-Per-AS",
38 : .human_description = "Advertise bestpath per AS via addpath",
39 : .type_json_name = "addpathTxBestpathPerAS",
40 : .id_json_name = "addpathTxIdBestPerAS"
41 : }
42 : };
43 :
44 : static const struct bgp_addpath_strategy_names unknown_names = {
45 : .config_name = "addpath-tx-unknown",
46 : .human_name = "Unknown-Addpath-Strategy",
47 : .human_description = "Unknown Addpath Strategy",
48 : .type_json_name = "addpathTxUnknown",
49 : .id_json_name = "addpathTxIdUnknown"
50 : };
51 :
52 : /*
53 : * Returns a structure full of strings associated with an addpath type. Will
54 : * never return null.
55 : */
56 : const struct bgp_addpath_strategy_names *
57 0 : bgp_addpath_names(enum bgp_addpath_strat strat)
58 : {
59 0 : if (strat < BGP_ADDPATH_MAX)
60 0 : return &(strat_names[strat]);
61 : else
62 : return &unknown_names;
63 : };
64 :
65 : /*
66 : * Returns if any peer is transmitting addpaths for a given afi/safi.
67 : */
68 0 : bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
69 : safi_t safi)
70 : {
71 0 : return d->total_peercount[afi][safi] > 0;
72 : }
73 :
74 : /*
75 : * Initialize the BGP instance level data for addpath.
76 : */
77 2 : void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
78 : {
79 2 : safi_t safi;
80 2 : afi_t afi;
81 2 : int i;
82 :
83 50 : FOREACH_AFI_SAFI (afi, safi) {
84 126 : for (i = 0; i < BGP_ADDPATH_MAX; i++) {
85 84 : d->id_allocators[afi][safi][i] = NULL;
86 84 : d->peercount[afi][safi][i] = 0;
87 : }
88 42 : d->total_peercount[afi][safi] = 0;
89 : }
90 2 : }
91 :
92 : /*
93 : * Free up resources associated with BGP route info structures.
94 : */
95 0 : void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
96 : struct bgp_addpath_node_data *nd)
97 : {
98 0 : int i;
99 :
100 0 : for (i = 0; i < BGP_ADDPATH_MAX; i++) {
101 0 : if (d->addpath_tx_id[i] != IDALLOC_INVALID)
102 0 : idalloc_free_to_pool(&nd->free_ids[i],
103 : d->addpath_tx_id[i]);
104 : }
105 0 : }
106 :
107 : /*
108 : * Return the addpath ID used to send a particular route, to a particular peer,
109 : * in a particular AFI/SAFI.
110 : */
111 0 : uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
112 : struct bgp_addpath_info_data *d)
113 : {
114 0 : if (safi == SAFI_LABELED_UNICAST)
115 0 : safi = SAFI_UNICAST;
116 :
117 0 : if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
118 0 : return d->addpath_tx_id[peer->addpath_type[afi][safi]];
119 : else
120 : return IDALLOC_INVALID;
121 : }
122 :
123 : /*
124 : * Returns true if the path has an assigned addpath ID for any of the addpath
125 : * strategies.
126 : */
127 0 : bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
128 : {
129 0 : int i;
130 :
131 0 : for (i = 0; i < BGP_ADDPATH_MAX; i++)
132 0 : if (d->addpath_tx_id[i] != 0)
133 : return true;
134 :
135 : return false;
136 : }
137 :
138 : /*
139 : * Releases any ID's associated with the BGP prefix.
140 : */
141 16 : void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
142 : struct bgp_addpath_node_data *nd, afi_t afi,
143 : safi_t safi)
144 : {
145 16 : int i;
146 :
147 48 : for (i = 0; i < BGP_ADDPATH_MAX; i++) {
148 32 : idalloc_drain_pool(bd->id_allocators[afi][safi][i],
149 : &(nd->free_ids[i]));
150 : }
151 16 : }
152 :
153 : /*
154 : * Check to see if the addpath strategy requires DMED to be configured to work.
155 : */
156 0 : bool bgp_addpath_dmed_required(int strategy)
157 : {
158 0 : return strategy == BGP_ADDPATH_BEST_PER_AS;
159 : }
160 :
161 : /*
162 : * Return true if this is a path we should advertise due to a
163 : * configured addpath-tx knob
164 : */
165 0 : bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, struct bgp_path_info *pi)
166 : {
167 0 : switch (strat) {
168 : case BGP_ADDPATH_NONE:
169 : return false;
170 : case BGP_ADDPATH_ALL:
171 : return true;
172 0 : case BGP_ADDPATH_BEST_PER_AS:
173 0 : if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
174 : return true;
175 : else
176 : return false;
177 : case BGP_ADDPATH_MAX:
178 : return false;
179 : }
180 :
181 0 : assert(!"Reached end of function we should never hit");
182 : }
183 :
184 0 : static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
185 : enum bgp_addpath_strat addpath_type,
186 : struct bgp_dest *dest)
187 : {
188 0 : struct bgp_path_info *pi;
189 :
190 0 : if (safi == SAFI_LABELED_UNICAST)
191 0 : safi = SAFI_UNICAST;
192 :
193 0 : idalloc_drain_pool(
194 : bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
195 : &(dest->tx_addpath.free_ids[addpath_type]));
196 0 : for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
197 0 : if (pi->tx_addpath.addpath_tx_id[addpath_type]
198 : != IDALLOC_INVALID) {
199 0 : idalloc_free(
200 : bgp->tx_addpath
201 : .id_allocators[afi][safi][addpath_type],
202 : pi->tx_addpath.addpath_tx_id[addpath_type]);
203 0 : pi->tx_addpath.addpath_tx_id[addpath_type] =
204 : IDALLOC_INVALID;
205 : }
206 : }
207 0 : }
208 :
209 : /*
210 : * Purge all addpath ID's on a BGP instance associated with the addpath
211 : * strategy, and afi/safi combination. This lets us let go of all memory held to
212 : * track ID numbers associated with an addpath type not in use. Since
213 : * post-bestpath ID processing is skipped for types not used, this is the only
214 : * chance to free this data.
215 : */
216 0 : static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
217 : enum bgp_addpath_strat addpath_type)
218 : {
219 0 : struct bgp_dest *dest, *ndest;
220 :
221 0 : if (safi == SAFI_LABELED_UNICAST)
222 0 : safi = SAFI_UNICAST;
223 :
224 0 : for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
225 0 : dest = bgp_route_next(dest)) {
226 0 : if (safi == SAFI_MPLS_VPN) {
227 0 : struct bgp_table *table;
228 :
229 0 : table = bgp_dest_get_bgp_table_info(dest);
230 0 : if (!table)
231 0 : continue;
232 :
233 0 : for (ndest = bgp_table_top(table); ndest;
234 0 : ndest = bgp_route_next(ndest))
235 0 : bgp_addpath_flush_type_rn(bgp, afi, safi,
236 : addpath_type, ndest);
237 : } else {
238 0 : bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type,
239 : dest);
240 : }
241 : }
242 :
243 0 : idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
244 0 : bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
245 0 : }
246 :
247 : /*
248 : * Allocate an Addpath ID for the given type on a path, if necessary.
249 : */
250 0 : static void bgp_addpath_populate_path(struct id_alloc *allocator,
251 : struct bgp_path_info *path,
252 : enum bgp_addpath_strat addpath_type)
253 : {
254 0 : if (bgp_addpath_tx_path(addpath_type, path)) {
255 0 : path->tx_addpath.addpath_tx_id[addpath_type] =
256 0 : idalloc_allocate(allocator);
257 : }
258 0 : }
259 :
260 : /*
261 : * Compute addpath ID's on a BGP instance associated with the addpath strategy,
262 : * and afi/safi combination. Since we won't waste the time computing addpath IDs
263 : * for unused strategies, the first time a peer is configured to use a strategy,
264 : * we have to backfill the data.
265 : * In labeled-unicast, addpath allocations SHOULD be done in unicast SAFI.
266 : */
267 0 : static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
268 : enum bgp_addpath_strat addpath_type)
269 : {
270 0 : struct bgp_dest *dest, *ndest;
271 0 : char buf[200];
272 0 : struct id_alloc *allocator;
273 :
274 0 : if (safi == SAFI_LABELED_UNICAST)
275 0 : safi = SAFI_UNICAST;
276 :
277 0 : snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
278 0 : bgp_addpath_names(addpath_type)->config_name, (int)afi,
279 : (int)safi);
280 0 : buf[sizeof(buf) - 1] = '\0';
281 0 : zlog_info("Computing addpath IDs for addpath type %s",
282 : bgp_addpath_names(addpath_type)->human_name);
283 :
284 0 : bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
285 0 : idalloc_new(buf);
286 :
287 0 : idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
288 : BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
289 :
290 0 : allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
291 :
292 0 : for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
293 0 : dest = bgp_route_next(dest)) {
294 0 : struct bgp_path_info *bi;
295 :
296 0 : if (safi == SAFI_MPLS_VPN) {
297 0 : struct bgp_table *table;
298 :
299 0 : table = bgp_dest_get_bgp_table_info(dest);
300 0 : if (!table)
301 0 : continue;
302 :
303 0 : for (ndest = bgp_table_top(table); ndest;
304 0 : ndest = bgp_route_next(ndest))
305 0 : for (bi = bgp_dest_get_bgp_path_info(ndest); bi;
306 0 : bi = bi->next)
307 0 : bgp_addpath_populate_path(allocator, bi,
308 : addpath_type);
309 : } else {
310 0 : for (bi = bgp_dest_get_bgp_path_info(dest); bi;
311 0 : bi = bi->next)
312 0 : bgp_addpath_populate_path(allocator, bi,
313 : addpath_type);
314 : }
315 : }
316 0 : }
317 :
318 : /*
319 : * Handle updates to a peer or group's addpath strategy. If after adjusting
320 : * counts a addpath strategy is in use for the first time, or no longer in use,
321 : * the IDs for that strategy will be populated or flushed.
322 : */
323 0 : void bgp_addpath_type_changed(struct bgp *bgp)
324 : {
325 0 : afi_t afi;
326 0 : safi_t safi;
327 0 : struct listnode *node, *nnode;
328 0 : struct peer *peer;
329 0 : int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
330 0 : enum bgp_addpath_strat type;
331 :
332 0 : FOREACH_AFI_SAFI(afi, safi) {
333 0 : for (type=0; type<BGP_ADDPATH_MAX; type++) {
334 0 : peer_count[afi][safi][type] = 0;
335 : }
336 0 : bgp->tx_addpath.total_peercount[afi][safi] = 0;
337 : }
338 :
339 0 : for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
340 0 : FOREACH_AFI_SAFI(afi, safi) {
341 0 : type = peer->addpath_type[afi][safi];
342 0 : if (type != BGP_ADDPATH_NONE) {
343 0 : peer_count[afi][safi][type] += 1;
344 0 : bgp->tx_addpath.total_peercount[afi][safi] += 1;
345 : }
346 : }
347 : }
348 :
349 0 : FOREACH_AFI_SAFI(afi, safi) {
350 0 : for (type=0; type<BGP_ADDPATH_MAX; type++) {
351 0 : int old = bgp->tx_addpath.peercount[afi][safi][type];
352 0 : int new = peer_count[afi][safi][type];
353 :
354 0 : bgp->tx_addpath.peercount[afi][safi][type] = new;
355 :
356 0 : if (old == 0 && new != 0) {
357 0 : bgp_addpath_populate_type(bgp, afi, safi,
358 : type);
359 0 : } else if (old != 0 && new == 0) {
360 0 : bgp_addpath_flush_type(bgp, afi, safi, type);
361 : }
362 : }
363 : }
364 0 : }
365 :
366 : /*
367 : * Change the addpath type assigned to a peer, or peer group. In addition to
368 : * adjusting the counts, peer sessions will be reset as needed to make the
369 : * change take effect.
370 : */
371 105 : void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
372 : enum bgp_addpath_strat addpath_type)
373 : {
374 105 : struct bgp *bgp = peer->bgp;
375 105 : enum bgp_addpath_strat old_type;
376 105 : struct listnode *node, *nnode;
377 105 : struct peer *tmp_peer;
378 105 : struct peer_group *group;
379 :
380 105 : if (safi == SAFI_LABELED_UNICAST)
381 15 : safi = SAFI_UNICAST;
382 :
383 105 : old_type = peer->addpath_type[afi][safi];
384 105 : if (addpath_type == old_type)
385 : return;
386 :
387 0 : if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
388 0 : !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
389 : /* A "no" config on a group member inherits group */
390 0 : addpath_type = peer->group->conf->addpath_type[afi][safi];
391 : }
392 :
393 0 : peer->addpath_type[afi][safi] = addpath_type;
394 :
395 0 : bgp_addpath_type_changed(bgp);
396 :
397 0 : if (addpath_type != BGP_ADDPATH_NONE) {
398 0 : if (bgp_addpath_dmed_required(addpath_type)) {
399 0 : if (!CHECK_FLAG(bgp->flags,
400 : BGP_FLAG_DETERMINISTIC_MED)) {
401 0 : zlog_warn(
402 : "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
403 : peer->host);
404 0 : SET_FLAG(bgp->flags,
405 : BGP_FLAG_DETERMINISTIC_MED);
406 0 : bgp_recalculate_all_bestpaths(bgp);
407 : }
408 : }
409 : }
410 :
411 0 : zlog_info("Resetting peer %s%pBP due to change in addpath config",
412 : CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
413 : peer);
414 :
415 0 : if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
416 0 : group = peer->group;
417 :
418 : /* group will be null as peer_group_delete calls peer_delete on
419 : * group->conf. That peer_delete will eventuallly end up here
420 : * if the group was configured to tx addpaths.
421 : */
422 0 : if (group != NULL) {
423 0 : for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
424 : tmp_peer)) {
425 0 : if (tmp_peer->addpath_type[afi][safi] ==
426 : old_type) {
427 0 : bgp_addpath_set_peer_type(tmp_peer,
428 : afi,
429 : safi,
430 : addpath_type);
431 : }
432 : }
433 : }
434 : } else {
435 0 : peer_change_action(peer, afi, safi, peer_change_reset);
436 : }
437 :
438 : }
439 :
440 : /*
441 : * Intended to run after bestpath. This function will take TX IDs from paths
442 : * that no longer need them, and give them to paths that do. This prevents
443 : * best-per-as updates from needing to do a separate withdraw and update just to
444 : * swap out which path is sent.
445 : */
446 0 : void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *bn, afi_t afi,
447 : safi_t safi)
448 : {
449 0 : int i;
450 0 : struct bgp_path_info *pi;
451 0 : struct id_alloc_pool **pool_ptr;
452 :
453 0 : if (safi == SAFI_LABELED_UNICAST)
454 0 : safi = SAFI_UNICAST;
455 :
456 0 : for (i = 0; i < BGP_ADDPATH_MAX; i++) {
457 0 : struct id_alloc *alloc =
458 : bgp->tx_addpath.id_allocators[afi][safi][i];
459 0 : pool_ptr = &(bn->tx_addpath.free_ids[i]);
460 :
461 0 : if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
462 0 : continue;
463 :
464 : /* Free Unused IDs back to the pool.*/
465 0 : for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
466 0 : if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
467 0 : && !bgp_addpath_tx_path(i, pi)) {
468 0 : idalloc_free_to_pool(pool_ptr,
469 : pi->tx_addpath.addpath_tx_id[i]);
470 0 : pi->tx_addpath.addpath_tx_id[i] =
471 : IDALLOC_INVALID;
472 : }
473 : }
474 :
475 : /* Give IDs to paths that need them (pulling from the pool) */
476 0 : for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
477 0 : if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
478 0 : && bgp_addpath_tx_path(i, pi)) {
479 0 : pi->tx_addpath.addpath_tx_id[i] =
480 0 : idalloc_allocate_prefer_pool(
481 : alloc, pool_ptr);
482 : }
483 : }
484 :
485 : /* Free any IDs left in the pool to the main allocator */
486 0 : idalloc_drain_pool(alloc, pool_ptr);
487 : }
488 0 : }
|