Line data Source code
1 : /*
2 : * PIM for Quagga
3 : * Copyright (C) 2015 Cumulus Networks, Inc.
4 : * Donald Sharp
5 : *
6 : * This program is free software; you can redistribute it and/or modify
7 : * it under the terms of the GNU General Public License as published by
8 : * the Free Software Foundation; either version 2 of the License, or
9 : * (at your option) any later version.
10 : *
11 : * This program is distributed in the hope that it will be useful, but
12 : * WITHOUT ANY WARRANTY; without even the implied warranty of
13 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 : * General Public License for more details.
15 : *
16 : * You should have received a copy of the GNU General Public License along
17 : * with this program; see the file COPYING; if not, write to the Free Software
18 : * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 : */
20 : #include <zebra.h>
21 :
22 : #include "lib/json.h"
23 : #include "log.h"
24 : #include "network.h"
25 : #include "if.h"
26 : #include "linklist.h"
27 : #include "prefix.h"
28 : #include "memory.h"
29 : #include "vty.h"
30 : #include "vrf.h"
31 : #include "plist.h"
32 : #include "nexthop.h"
33 : #include "table.h"
34 : #include "lib_errors.h"
35 :
36 : #include "pimd.h"
37 : #include "pim_instance.h"
38 : #include "pim_vty.h"
39 : #include "pim_str.h"
40 : #include "pim_iface.h"
41 : #include "pim_rp.h"
42 : #include "pim_rpf.h"
43 : #include "pim_sock.h"
44 : #include "pim_memory.h"
45 : #include "pim_neighbor.h"
46 : #include "pim_msdp.h"
47 : #include "pim_nht.h"
48 : #include "pim_mroute.h"
49 : #include "pim_oil.h"
50 : #include "pim_zebra.h"
51 : #include "pim_bsm.h"
52 : #include "pim_util.h"
53 : #include "pim_ssm.h"
54 : #include "termtable.h"
55 :
56 : /* Cleanup pim->rpf_hash each node data */
57 1 : void pim_rp_list_hash_clean(void *data)
58 : {
59 1 : struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
60 :
61 1 : list_delete(&pnc->rp_list);
62 :
63 1 : hash_clean(pnc->upstream_hash, NULL);
64 1 : hash_free(pnc->upstream_hash);
65 1 : pnc->upstream_hash = NULL;
66 1 : if (pnc->nexthop)
67 0 : nexthops_free(pnc->nexthop);
68 :
69 1 : XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
70 1 : }
71 :
72 4 : static void pim_rp_info_free(struct rp_info *rp_info)
73 : {
74 4 : XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
75 :
76 4 : XFREE(MTYPE_PIM_RP, rp_info);
77 4 : }
78 :
79 0 : int pim_rp_list_cmp(void *v1, void *v2)
80 : {
81 0 : struct rp_info *rp1 = (struct rp_info *)v1;
82 0 : struct rp_info *rp2 = (struct rp_info *)v2;
83 0 : int ret;
84 :
85 : /*
86 : * Sort by RP IP address
87 : */
88 0 : ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
89 0 : if (ret)
90 : return ret;
91 :
92 : /*
93 : * Sort by group IP address
94 : */
95 0 : ret = prefix_cmp(&rp1->group, &rp2->group);
96 0 : if (ret)
97 : return ret;
98 :
99 : return 0;
100 : }
101 :
102 4 : void pim_rp_init(struct pim_instance *pim)
103 : {
104 4 : struct rp_info *rp_info;
105 4 : struct route_node *rn;
106 :
107 4 : pim->rp_list = list_new();
108 4 : pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
109 4 : pim->rp_list->cmp = pim_rp_list_cmp;
110 :
111 4 : pim->rp_table = route_table_init();
112 :
113 4 : rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
114 :
115 4 : if (!pim_get_all_mcast_group(&rp_info->group)) {
116 0 : flog_err(EC_LIB_DEVELOPMENT,
117 : "Unable to convert all-multicast prefix");
118 0 : list_delete(&pim->rp_list);
119 0 : route_table_finish(pim->rp_table);
120 0 : XFREE(MTYPE_PIM_RP, rp_info);
121 0 : return;
122 : }
123 4 : rp_info->rp.rpf_addr = PIMADDR_ANY;
124 :
125 4 : listnode_add(pim->rp_list, rp_info);
126 :
127 4 : rn = route_node_get(pim->rp_table, &rp_info->group);
128 4 : rn->info = rp_info;
129 4 : if (PIM_DEBUG_PIM_TRACE)
130 0 : zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
131 : rp_info, &rp_info->group,
132 : route_node_get_lock_count(rn));
133 : }
134 :
135 4 : void pim_rp_free(struct pim_instance *pim)
136 : {
137 4 : if (pim->rp_table)
138 4 : route_table_finish(pim->rp_table);
139 4 : pim->rp_table = NULL;
140 :
141 4 : if (pim->rp_list)
142 4 : list_delete(&pim->rp_list);
143 4 : }
144 :
145 : /*
146 : * Given an RP's prefix-list, return the RP's rp_info for that prefix-list
147 : */
148 0 : static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
149 : pim_addr rp, const char *plist)
150 : {
151 0 : struct listnode *node;
152 0 : struct rp_info *rp_info;
153 :
154 0 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
155 0 : if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
156 0 : rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
157 0 : return rp_info;
158 : }
159 : }
160 :
161 : return NULL;
162 : }
163 :
164 : /*
165 : * Return true if plist is used by any rp_info
166 : */
167 0 : static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
168 : {
169 0 : struct listnode *node;
170 0 : struct rp_info *rp_info;
171 :
172 0 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
173 0 : if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
174 : return 1;
175 : }
176 : }
177 :
178 : return 0;
179 : }
180 :
181 : /*
182 : * Given an RP's address, return the RP's rp_info that is an exact match for
183 : * 'group'
184 : */
185 0 : static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
186 : const struct prefix *group)
187 : {
188 0 : struct listnode *node;
189 0 : struct rp_info *rp_info;
190 :
191 0 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
192 0 : if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
193 0 : prefix_same(&rp_info->group, group))
194 0 : return rp_info;
195 : }
196 :
197 : return NULL;
198 : }
199 :
200 : /*
201 : * XXX: long-term issue: we don't actually have a good "ip address-list"
202 : * implementation. ("access-list XYZ" is the closest but honestly it's
203 : * kinda garbage.)
204 : *
205 : * So it's using a prefix-list to match an address here, which causes very
206 : * unexpected results for the user since prefix-lists by default only match
207 : * when the prefix length is an exact match too. i.e. you'd have to add the
208 : * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
209 : *
210 : * To avoid this pitfall, this code uses "address_mode = true" for the prefix
211 : * list match (this is the only user for that.)
212 : *
213 : * In the long run, we need to add a "ip address-list", but that's a wholly
214 : * separate bag of worms, and existing configs using ip prefix-list would
215 : * drop into the UX pitfall.
216 : */
217 :
218 : #include "lib/plist_int.h"
219 :
220 : /*
221 : * Given a group, return the rp_info for that group
222 : */
223 12 : struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
224 : const struct prefix *group)
225 : {
226 12 : struct listnode *node;
227 12 : struct rp_info *best = NULL;
228 12 : struct rp_info *rp_info;
229 12 : struct prefix_list *plist;
230 12 : const struct prefix *bp;
231 12 : const struct prefix_list_entry *entry;
232 12 : struct route_node *rn;
233 :
234 12 : bp = NULL;
235 36 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
236 12 : if (rp_info->plist) {
237 0 : plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
238 :
239 0 : if (prefix_list_apply_ext(plist, &entry, group, true)
240 0 : == PREFIX_DENY || !entry)
241 0 : continue;
242 :
243 0 : if (!best) {
244 0 : best = rp_info;
245 0 : bp = &entry->prefix;
246 0 : continue;
247 : }
248 :
249 0 : if (bp && bp->prefixlen < entry->prefix.prefixlen) {
250 0 : best = rp_info;
251 0 : bp = &entry->prefix;
252 : }
253 : }
254 : }
255 :
256 12 : rn = route_node_match(pim->rp_table, group);
257 12 : if (!rn) {
258 0 : flog_err(
259 : EC_LIB_DEVELOPMENT,
260 : "%s: BUG We should have found default group information",
261 : __func__);
262 0 : return best;
263 : }
264 :
265 12 : rp_info = rn->info;
266 12 : if (PIM_DEBUG_PIM_TRACE) {
267 0 : if (best)
268 0 : zlog_debug(
269 : "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
270 : group, best->plist, rn, &rp_info->group);
271 : else
272 0 : zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group,
273 : rn, &rp_info->group);
274 : }
275 :
276 12 : route_unlock_node(rn);
277 :
278 : /*
279 : * rp's with prefix lists have the group as 224.0.0.0/4 which will
280 : * match anything. So if we have a rp_info that should match a prefix
281 : * list then if we do match then best should be the answer( even
282 : * if it is NULL )
283 : */
284 12 : if (!rp_info || (rp_info && rp_info->plist))
285 : return best;
286 :
287 : /*
288 : * So we have a non plist rp_info found in the lookup and no plists
289 : * at all to be choosen, return it!
290 : */
291 12 : if (!best)
292 : return rp_info;
293 :
294 : /*
295 : * If we have a matching non prefix list and a matching prefix
296 : * list we should return the actual rp_info that has the LPM
297 : * If they are equal, use the prefix-list( but let's hope
298 : * the end-operator doesn't do this )
299 : */
300 0 : if (rp_info->group.prefixlen > bp->prefixlen)
301 12 : best = rp_info;
302 :
303 : return best;
304 : }
305 :
306 : /*
307 : * When the user makes "ip pim rp" configuration changes or if they change the
308 : * prefix-list(s) used by these statements we must tickle the upstream state
309 : * for each group to make them re-lookup who their RP should be.
310 : *
311 : * This is a placeholder function for now.
312 : */
313 0 : void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
314 : {
315 0 : pim_msdp_i_am_rp_changed(pim);
316 0 : pim_upstream_reeval_use_rpt(pim);
317 0 : }
318 :
319 0 : void pim_rp_prefix_list_update(struct pim_instance *pim,
320 : struct prefix_list *plist)
321 : {
322 0 : struct listnode *node;
323 0 : struct rp_info *rp_info;
324 0 : int refresh_needed = 0;
325 :
326 0 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
327 0 : if (rp_info->plist
328 0 : && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
329 : refresh_needed = 1;
330 : break;
331 : }
332 : }
333 :
334 0 : if (refresh_needed)
335 0 : pim_rp_refresh_group_to_rp_mapping(pim);
336 0 : }
337 :
338 0 : static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
339 : struct pim_interface *pim_ifp)
340 : {
341 0 : struct listnode *node;
342 0 : struct pim_secondary_addr *sec_addr;
343 0 : pim_addr sec_paddr;
344 :
345 0 : if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
346 : return 1;
347 :
348 0 : if (!pim_ifp->sec_addr_list) {
349 : return 0;
350 : }
351 :
352 0 : for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
353 0 : sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
354 : /* If an RP-address is self, It should be enough to say
355 : * I am RP the prefix-length should not matter here */
356 0 : if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
357 : return 1;
358 : }
359 :
360 : return 0;
361 : }
362 :
363 0 : static void pim_rp_check_interfaces(struct pim_instance *pim,
364 : struct rp_info *rp_info)
365 : {
366 0 : struct interface *ifp;
367 :
368 0 : rp_info->i_am_rp = 0;
369 0 : FOR_ALL_INTERFACES (pim->vrf, ifp) {
370 0 : struct pim_interface *pim_ifp = ifp->info;
371 :
372 0 : if (!pim_ifp)
373 0 : continue;
374 :
375 0 : if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
376 0 : rp_info->i_am_rp = 1;
377 : }
378 : }
379 0 : }
380 :
381 0 : void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
382 : {
383 0 : struct pim_rpf old_rpf;
384 0 : enum pim_rpf_result rpf_result;
385 0 : pim_addr old_upstream_addr;
386 0 : pim_addr new_upstream_addr;
387 :
388 0 : old_upstream_addr = up->upstream_addr;
389 0 : pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
390 : up->sg.grp);
391 :
392 0 : if (PIM_DEBUG_PIM_TRACE)
393 0 : zlog_debug("%s: pim upstream update for old upstream %pPA",
394 : __func__, &old_upstream_addr);
395 :
396 0 : if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
397 0 : return;
398 :
399 : /* Lets consider a case, where a PIM upstream has a better RP as a
400 : * result of a new RP configuration with more precise group range.
401 : * This upstream has to be added to the upstream hash of new RP's
402 : * NHT(pnc) and has to be removed from old RP's NHT upstream hash
403 : */
404 0 : if (!pim_addr_is_any(old_upstream_addr)) {
405 : /* Deregister addr with Zebra NHT */
406 0 : if (PIM_DEBUG_PIM_TRACE)
407 0 : zlog_debug(
408 : "%s: Deregister upstream %s addr %pPA with Zebra NHT",
409 : __func__, up->sg_str, &old_upstream_addr);
410 0 : pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
411 : }
412 :
413 : /* Update the upstream address */
414 0 : up->upstream_addr = new_upstream_addr;
415 :
416 0 : old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
417 :
418 0 : rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
419 0 : if (rpf_result == PIM_RPF_FAILURE)
420 0 : pim_mroute_del(up->channel_oil, __func__);
421 :
422 : /* update kernel multicast forwarding cache (MFC) */
423 0 : if (up->rpf.source_nexthop.interface && up->channel_oil)
424 0 : pim_upstream_mroute_iif_update(up->channel_oil, __func__);
425 :
426 0 : if (rpf_result == PIM_RPF_CHANGED ||
427 0 : (rpf_result == PIM_RPF_FAILURE &&
428 0 : old_rpf.source_nexthop.interface))
429 0 : pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
430 :
431 : }
432 :
433 0 : int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
434 : const char *plist, enum rp_source rp_src_flag)
435 : {
436 0 : int result = 0;
437 0 : struct rp_info *rp_info;
438 0 : struct rp_info *rp_all;
439 0 : struct prefix group_all;
440 0 : struct listnode *node, *nnode;
441 0 : struct rp_info *tmp_rp_info;
442 0 : char buffer[BUFSIZ];
443 0 : pim_addr nht_p;
444 0 : struct route_node *rn = NULL;
445 0 : struct pim_upstream *up;
446 0 : bool upstream_updated = false;
447 :
448 0 : if (pim_addr_is_any(rp_addr))
449 : return PIM_RP_BAD_ADDRESS;
450 :
451 0 : rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
452 :
453 0 : rp_info->rp.rpf_addr = rp_addr;
454 0 : prefix_copy(&rp_info->group, &group);
455 0 : rp_info->rp_src = rp_src_flag;
456 :
457 0 : if (plist) {
458 : /*
459 : * Return if the prefix-list is already configured for this RP
460 : */
461 0 : if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
462 0 : XFREE(MTYPE_PIM_RP, rp_info);
463 0 : return PIM_SUCCESS;
464 : }
465 :
466 : /*
467 : * Barf if the prefix-list is already configured for an RP
468 : */
469 0 : if (pim_rp_prefix_list_used(pim, plist)) {
470 0 : XFREE(MTYPE_PIM_RP, rp_info);
471 0 : return PIM_RP_PFXLIST_IN_USE;
472 : }
473 :
474 : /*
475 : * Free any existing rp_info entries for this RP
476 : */
477 0 : for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
478 : tmp_rp_info)) {
479 0 : if (!pim_addr_cmp(rp_info->rp.rpf_addr,
480 : tmp_rp_info->rp.rpf_addr)) {
481 0 : if (tmp_rp_info->plist)
482 0 : pim_rp_del_config(pim, rp_addr, NULL,
483 : tmp_rp_info->plist);
484 : else
485 0 : pim_rp_del_config(
486 : pim, rp_addr,
487 0 : prefix2str(&tmp_rp_info->group,
488 : buffer, BUFSIZ),
489 : NULL);
490 : }
491 : }
492 :
493 0 : rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
494 : } else {
495 :
496 0 : if (!pim_get_all_mcast_group(&group_all)) {
497 0 : XFREE(MTYPE_PIM_RP, rp_info);
498 0 : return PIM_GROUP_BAD_ADDRESS;
499 : }
500 0 : rp_all = pim_rp_find_match_group(pim, &group_all);
501 :
502 : /*
503 : * Barf if group is a non-multicast subnet
504 : */
505 0 : if (!prefix_match(&rp_all->group, &rp_info->group)) {
506 0 : XFREE(MTYPE_PIM_RP, rp_info);
507 0 : return PIM_GROUP_BAD_ADDRESS;
508 : }
509 :
510 : /*
511 : * Remove any prefix-list rp_info entries for this RP
512 : */
513 0 : for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
514 : tmp_rp_info)) {
515 0 : if (tmp_rp_info->plist &&
516 0 : (!pim_addr_cmp(rp_info->rp.rpf_addr,
517 : tmp_rp_info->rp.rpf_addr))) {
518 0 : pim_rp_del_config(pim, rp_addr, NULL,
519 : tmp_rp_info->plist);
520 : }
521 : }
522 :
523 : /*
524 : * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
525 : */
526 0 : if (prefix_same(&rp_all->group, &rp_info->group) &&
527 0 : pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
528 0 : rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
529 0 : rp_all->rp_src = rp_src_flag;
530 0 : XFREE(MTYPE_PIM_RP, rp_info);
531 :
532 : /* Register addr with Zebra NHT */
533 0 : nht_p = rp_all->rp.rpf_addr;
534 0 : if (PIM_DEBUG_PIM_NHT_RP)
535 0 : zlog_debug(
536 : "%s: NHT Register rp_all addr %pPA grp %pFX ",
537 : __func__, &nht_p, &rp_all->group);
538 :
539 0 : frr_each (rb_pim_upstream, &pim->upstream_head, up) {
540 : /* Find (*, G) upstream whose RP is not
541 : * configured yet
542 : */
543 0 : if (pim_addr_is_any(up->upstream_addr) &&
544 0 : pim_addr_is_any(up->sg.src)) {
545 0 : struct prefix grp;
546 0 : struct rp_info *trp_info;
547 :
548 0 : pim_addr_to_prefix(&grp, up->sg.grp);
549 0 : trp_info = pim_rp_find_match_group(
550 : pim, &grp);
551 0 : if (trp_info == rp_all) {
552 0 : pim_upstream_update(pim, up);
553 0 : upstream_updated = true;
554 : }
555 : }
556 : }
557 0 : if (upstream_updated)
558 0 : pim_zebra_update_all_interfaces(pim);
559 :
560 0 : pim_rp_check_interfaces(pim, rp_all);
561 0 : pim_rp_refresh_group_to_rp_mapping(pim);
562 0 : pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
563 : NULL);
564 :
565 0 : if (!pim_ecmp_nexthop_lookup(pim,
566 : &rp_all->rp.source_nexthop,
567 : nht_p, &rp_all->group, 1))
568 : return PIM_RP_NO_PATH;
569 : return PIM_SUCCESS;
570 : }
571 :
572 : /*
573 : * Return if the group is already configured for this RP
574 : */
575 0 : tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
576 0 : if (tmp_rp_info) {
577 0 : if ((tmp_rp_info->rp_src != rp_src_flag)
578 0 : && (rp_src_flag == RP_SRC_STATIC))
579 0 : tmp_rp_info->rp_src = rp_src_flag;
580 0 : XFREE(MTYPE_PIM_RP, rp_info);
581 0 : return result;
582 : }
583 :
584 : /*
585 : * Barf if this group is already covered by some other RP
586 : */
587 0 : tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
588 :
589 0 : if (tmp_rp_info) {
590 0 : if (tmp_rp_info->plist) {
591 0 : XFREE(MTYPE_PIM_RP, rp_info);
592 0 : return PIM_GROUP_PFXLIST_OVERLAP;
593 : } else {
594 : /*
595 : * If the only RP that covers this group is an
596 : * RP configured for
597 : * 224.0.0.0/4 that is fine, ignore that one.
598 : * For all others
599 : * though we must return PIM_GROUP_OVERLAP
600 : */
601 0 : if (prefix_same(&rp_info->group,
602 0 : &tmp_rp_info->group)) {
603 0 : if ((rp_src_flag == RP_SRC_STATIC)
604 0 : && (tmp_rp_info->rp_src
605 : == RP_SRC_STATIC)) {
606 0 : XFREE(MTYPE_PIM_RP, rp_info);
607 0 : return PIM_GROUP_OVERLAP;
608 : }
609 :
610 0 : result = pim_rp_change(
611 : pim, rp_addr,
612 : tmp_rp_info->group,
613 : rp_src_flag);
614 0 : XFREE(MTYPE_PIM_RP, rp_info);
615 0 : return result;
616 : }
617 : }
618 : }
619 : }
620 :
621 0 : listnode_add_sort(pim->rp_list, rp_info);
622 :
623 0 : if (!rp_info->plist) {
624 0 : rn = route_node_get(pim->rp_table, &rp_info->group);
625 0 : rn->info = rp_info;
626 : }
627 :
628 0 : if (PIM_DEBUG_PIM_TRACE)
629 0 : zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
630 : rp_info, &rp_info->group,
631 : rn ? route_node_get_lock_count(rn) : 0);
632 :
633 0 : frr_each (rb_pim_upstream, &pim->upstream_head, up) {
634 0 : if (pim_addr_is_any(up->sg.src)) {
635 0 : struct prefix grp;
636 0 : struct rp_info *trp_info;
637 :
638 0 : pim_addr_to_prefix(&grp, up->sg.grp);
639 0 : trp_info = pim_rp_find_match_group(pim, &grp);
640 :
641 0 : if (trp_info == rp_info) {
642 0 : pim_upstream_update(pim, up);
643 0 : upstream_updated = true;
644 : }
645 : }
646 : }
647 :
648 0 : if (upstream_updated)
649 0 : pim_zebra_update_all_interfaces(pim);
650 :
651 0 : pim_rp_check_interfaces(pim, rp_info);
652 0 : pim_rp_refresh_group_to_rp_mapping(pim);
653 :
654 : /* Register addr with Zebra NHT */
655 0 : nht_p = rp_info->rp.rpf_addr;
656 0 : if (PIM_DEBUG_PIM_NHT_RP)
657 0 : zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
658 : __func__, &nht_p, &rp_info->group);
659 0 : pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
660 0 : if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
661 : &rp_info->group, 1))
662 : return PIM_RP_NO_PATH;
663 :
664 : return PIM_SUCCESS;
665 : }
666 :
667 0 : void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
668 : const char *group_range, const char *plist)
669 : {
670 0 : struct prefix group;
671 0 : int result;
672 :
673 0 : if (group_range == NULL)
674 0 : result = pim_get_all_mcast_group(&group);
675 : else
676 0 : result = str2prefix(group_range, &group);
677 :
678 0 : if (!result) {
679 0 : if (PIM_DEBUG_PIM_TRACE)
680 0 : zlog_debug(
681 : "%s: String to prefix failed for %pPAs group",
682 : __func__, &rp_addr);
683 0 : return;
684 : }
685 :
686 0 : pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
687 : }
688 :
689 0 : int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
690 : const char *plist, enum rp_source rp_src_flag)
691 : {
692 0 : struct prefix g_all;
693 0 : struct rp_info *rp_info;
694 0 : struct rp_info *rp_all;
695 0 : pim_addr nht_p;
696 0 : struct route_node *rn;
697 0 : bool was_plist = false;
698 0 : struct rp_info *trp_info;
699 0 : struct pim_upstream *up;
700 0 : struct bsgrp_node *bsgrp = NULL;
701 0 : struct bsm_rpinfo *bsrp = NULL;
702 0 : bool upstream_updated = false;
703 :
704 0 : if (plist)
705 0 : rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
706 : else
707 0 : rp_info = pim_rp_find_exact(pim, rp_addr, &group);
708 :
709 0 : if (!rp_info)
710 : return PIM_RP_NOT_FOUND;
711 :
712 0 : if (rp_info->plist) {
713 0 : XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
714 0 : was_plist = true;
715 : }
716 :
717 0 : if (PIM_DEBUG_PIM_TRACE)
718 0 : zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
719 : &rp_addr, &group);
720 :
721 : /* While static RP is getting deleted, we need to check if dynamic RP
722 : * present for the same group in BSM RP table, then install the dynamic
723 : * RP for the group node into the main rp table
724 : */
725 0 : if (rp_src_flag == RP_SRC_STATIC) {
726 0 : bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
727 :
728 0 : if (bsgrp) {
729 0 : bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
730 0 : if (bsrp) {
731 0 : if (PIM_DEBUG_PIM_TRACE)
732 0 : zlog_debug(
733 : "%s: BSM RP %pPA found for the group %pFX",
734 : __func__, &bsrp->rp_address,
735 : &group);
736 0 : return pim_rp_change(pim, bsrp->rp_address,
737 : group, RP_SRC_BSR);
738 : }
739 : } else {
740 0 : if (PIM_DEBUG_PIM_TRACE)
741 0 : zlog_debug(
742 : "%s: BSM RP not found for the group %pFX",
743 : __func__, &group);
744 : }
745 : }
746 :
747 : /* Deregister addr with Zebra NHT */
748 0 : nht_p = rp_info->rp.rpf_addr;
749 0 : if (PIM_DEBUG_PIM_NHT_RP)
750 0 : zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
751 : &nht_p);
752 0 : pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
753 :
754 0 : if (!pim_get_all_mcast_group(&g_all))
755 : return PIM_RP_BAD_ADDRESS;
756 :
757 0 : rp_all = pim_rp_find_match_group(pim, &g_all);
758 :
759 0 : if (rp_all == rp_info) {
760 0 : frr_each (rb_pim_upstream, &pim->upstream_head, up) {
761 : /* Find the upstream (*, G) whose upstream address is
762 : * same as the deleted RP
763 : */
764 0 : pim_addr rpf_addr;
765 :
766 0 : rpf_addr = rp_info->rp.rpf_addr;
767 0 : if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
768 0 : pim_addr_is_any(up->sg.src)) {
769 0 : struct prefix grp;
770 :
771 0 : pim_addr_to_prefix(&grp, up->sg.grp);
772 0 : trp_info = pim_rp_find_match_group(pim, &grp);
773 0 : if (trp_info == rp_all) {
774 0 : pim_upstream_rpf_clear(pim, up);
775 0 : up->upstream_addr = PIMADDR_ANY;
776 : }
777 : }
778 : }
779 0 : rp_all->rp.rpf_addr = PIMADDR_ANY;
780 0 : rp_all->i_am_rp = 0;
781 0 : return PIM_SUCCESS;
782 : }
783 :
784 0 : listnode_delete(pim->rp_list, rp_info);
785 :
786 0 : if (!was_plist) {
787 0 : rn = route_node_get(pim->rp_table, &rp_info->group);
788 0 : if (rn) {
789 0 : if (rn->info != rp_info)
790 0 : flog_err(
791 : EC_LIB_DEVELOPMENT,
792 : "Expected rn->info to be equal to rp_info");
793 :
794 0 : if (PIM_DEBUG_PIM_TRACE)
795 0 : zlog_debug(
796 : "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
797 : __func__, rn, rp_info, &rp_info->group,
798 : route_node_get_lock_count(rn));
799 :
800 0 : rn->info = NULL;
801 0 : route_unlock_node(rn);
802 0 : route_unlock_node(rn);
803 : }
804 : }
805 :
806 0 : pim_rp_refresh_group_to_rp_mapping(pim);
807 :
808 0 : frr_each (rb_pim_upstream, &pim->upstream_head, up) {
809 : /* Find the upstream (*, G) whose upstream address is same as
810 : * the deleted RP
811 : */
812 0 : pim_addr rpf_addr;
813 :
814 0 : rpf_addr = rp_info->rp.rpf_addr;
815 0 : if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
816 0 : pim_addr_is_any(up->sg.src)) {
817 0 : struct prefix grp;
818 :
819 0 : pim_addr_to_prefix(&grp, up->sg.grp);
820 0 : trp_info = pim_rp_find_match_group(pim, &grp);
821 :
822 : /* RP not found for the group grp */
823 0 : if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
824 0 : pim_upstream_rpf_clear(pim, up);
825 0 : pim_rp_set_upstream_addr(
826 : pim, &up->upstream_addr, up->sg.src,
827 : up->sg.grp);
828 : }
829 :
830 : /* RP found for the group grp */
831 : else {
832 0 : pim_upstream_update(pim, up);
833 0 : upstream_updated = true;
834 : }
835 : }
836 : }
837 :
838 0 : if (upstream_updated)
839 0 : pim_zebra_update_all_interfaces(pim);
840 :
841 0 : XFREE(MTYPE_PIM_RP, rp_info);
842 0 : return PIM_SUCCESS;
843 : }
844 :
845 0 : int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
846 : struct prefix group, enum rp_source rp_src_flag)
847 : {
848 0 : pim_addr nht_p;
849 0 : struct route_node *rn;
850 0 : int result = 0;
851 0 : struct rp_info *rp_info = NULL;
852 0 : struct pim_upstream *up;
853 0 : bool upstream_updated = false;
854 0 : pim_addr old_rp_addr;
855 :
856 0 : rn = route_node_lookup(pim->rp_table, &group);
857 0 : if (!rn) {
858 0 : result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
859 0 : return result;
860 : }
861 :
862 0 : rp_info = rn->info;
863 :
864 0 : if (!rp_info) {
865 0 : route_unlock_node(rn);
866 0 : result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
867 0 : return result;
868 : }
869 :
870 0 : old_rp_addr = rp_info->rp.rpf_addr;
871 0 : if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
872 0 : if (rp_info->rp_src != rp_src_flag) {
873 0 : rp_info->rp_src = rp_src_flag;
874 0 : route_unlock_node(rn);
875 0 : return PIM_SUCCESS;
876 : }
877 : }
878 :
879 : /* Deregister old RP addr with Zebra NHT */
880 :
881 0 : if (!pim_addr_is_any(old_rp_addr)) {
882 0 : nht_p = rp_info->rp.rpf_addr;
883 0 : if (PIM_DEBUG_PIM_NHT_RP)
884 0 : zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
885 : __func__, &nht_p);
886 0 : pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
887 : }
888 :
889 0 : pim_rp_nexthop_del(rp_info);
890 0 : listnode_delete(pim->rp_list, rp_info);
891 : /* Update the new RP address*/
892 :
893 0 : rp_info->rp.rpf_addr = new_rp_addr;
894 0 : rp_info->rp_src = rp_src_flag;
895 0 : rp_info->i_am_rp = 0;
896 :
897 0 : listnode_add_sort(pim->rp_list, rp_info);
898 :
899 0 : frr_each (rb_pim_upstream, &pim->upstream_head, up) {
900 0 : if (pim_addr_is_any(up->sg.src)) {
901 0 : struct prefix grp;
902 0 : struct rp_info *trp_info;
903 :
904 0 : pim_addr_to_prefix(&grp, up->sg.grp);
905 0 : trp_info = pim_rp_find_match_group(pim, &grp);
906 :
907 0 : if (trp_info == rp_info) {
908 0 : pim_upstream_update(pim, up);
909 0 : upstream_updated = true;
910 : }
911 : }
912 : }
913 :
914 0 : if (upstream_updated)
915 0 : pim_zebra_update_all_interfaces(pim);
916 :
917 : /* Register new RP addr with Zebra NHT */
918 0 : nht_p = rp_info->rp.rpf_addr;
919 0 : if (PIM_DEBUG_PIM_NHT_RP)
920 0 : zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
921 : __func__, &nht_p, &rp_info->group);
922 :
923 0 : pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
924 0 : if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
925 : &rp_info->group, 1)) {
926 0 : route_unlock_node(rn);
927 0 : return PIM_RP_NO_PATH;
928 : }
929 :
930 0 : pim_rp_check_interfaces(pim, rp_info);
931 :
932 0 : route_unlock_node(rn);
933 :
934 0 : pim_rp_refresh_group_to_rp_mapping(pim);
935 :
936 0 : return result;
937 : }
938 :
939 38 : void pim_rp_setup(struct pim_instance *pim)
940 : {
941 38 : struct listnode *node;
942 38 : struct rp_info *rp_info;
943 38 : pim_addr nht_p;
944 :
945 114 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
946 38 : if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
947 38 : continue;
948 :
949 0 : nht_p = rp_info->rp.rpf_addr;
950 :
951 0 : pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
952 0 : if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
953 : nht_p, &rp_info->group, 1)) {
954 0 : if (PIM_DEBUG_PIM_NHT_RP)
955 0 : zlog_debug(
956 : "Unable to lookup nexthop for rp specified");
957 0 : pim_rp_nexthop_del(rp_info);
958 : }
959 : }
960 38 : }
961 :
962 : /*
963 : * Checks to see if we should elect ourself the actual RP when new if
964 : * addresses are added against an interface.
965 : */
966 38 : void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
967 : {
968 38 : struct listnode *node;
969 38 : struct rp_info *rp_info;
970 38 : bool i_am_rp_changed = false;
971 38 : struct pim_instance *pim = pim_ifp->pim;
972 :
973 38 : if (pim->rp_list == NULL)
974 : return;
975 :
976 76 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
977 38 : if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
978 38 : continue;
979 :
980 : /* if i_am_rp is already set nothing to be done (adding new
981 : * addresses
982 : * is not going to make a difference). */
983 0 : if (rp_info->i_am_rp) {
984 0 : continue;
985 : }
986 :
987 0 : if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
988 0 : i_am_rp_changed = true;
989 0 : rp_info->i_am_rp = 1;
990 0 : if (PIM_DEBUG_PIM_NHT_RP)
991 38 : zlog_debug("%s: %pPA: i am rp", __func__,
992 : &rp_info->rp.rpf_addr);
993 : }
994 : }
995 :
996 38 : if (i_am_rp_changed) {
997 0 : pim_msdp_i_am_rp_changed(pim);
998 0 : pim_upstream_reeval_use_rpt(pim);
999 : }
1000 : }
1001 :
1002 : /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
1003 : * are removed. Removing numbers is an uncommon event in an active network
1004 : * so I have made no attempt to optimize it. */
1005 4 : void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
1006 : {
1007 4 : struct listnode *node;
1008 4 : struct rp_info *rp_info;
1009 4 : bool i_am_rp_changed = false;
1010 4 : int old_i_am_rp;
1011 :
1012 4 : if (pim->rp_list == NULL)
1013 : return;
1014 :
1015 8 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1016 4 : if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1017 4 : continue;
1018 :
1019 0 : old_i_am_rp = rp_info->i_am_rp;
1020 0 : pim_rp_check_interfaces(pim, rp_info);
1021 :
1022 0 : if (old_i_am_rp != rp_info->i_am_rp) {
1023 0 : i_am_rp_changed = true;
1024 0 : if (PIM_DEBUG_PIM_NHT_RP) {
1025 0 : if (rp_info->i_am_rp)
1026 0 : zlog_debug("%s: %pPA: i am rp",
1027 : __func__,
1028 : &rp_info->rp.rpf_addr);
1029 : else
1030 4 : zlog_debug(
1031 : "%s: %pPA: i am no longer rp",
1032 : __func__,
1033 : &rp_info->rp.rpf_addr);
1034 : }
1035 : }
1036 : }
1037 :
1038 4 : if (i_am_rp_changed) {
1039 0 : pim_msdp_i_am_rp_changed(pim);
1040 0 : pim_upstream_reeval_use_rpt(pim);
1041 : }
1042 : }
1043 :
1044 : /*
1045 : * I_am_RP(G) is true if the group-to-RP mapping indicates that
1046 : * this router is the RP for the group.
1047 : *
1048 : * Since we only have static RP, all groups are part of this RP
1049 : */
1050 5 : int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
1051 : {
1052 5 : struct prefix g;
1053 5 : struct rp_info *rp_info;
1054 :
1055 5 : memset(&g, 0, sizeof(g));
1056 5 : pim_addr_to_prefix(&g, group);
1057 5 : rp_info = pim_rp_find_match_group(pim, &g);
1058 :
1059 5 : if (rp_info)
1060 5 : return rp_info->i_am_rp;
1061 : return 0;
1062 : }
1063 :
1064 : /*
1065 : * RP(G)
1066 : *
1067 : * Return the RP that the Group belongs too.
1068 : */
1069 2 : struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
1070 : {
1071 2 : struct prefix g;
1072 2 : struct rp_info *rp_info;
1073 :
1074 2 : memset(&g, 0, sizeof(g));
1075 2 : pim_addr_to_prefix(&g, group);
1076 :
1077 2 : rp_info = pim_rp_find_match_group(pim, &g);
1078 :
1079 2 : if (rp_info) {
1080 2 : pim_addr nht_p;
1081 :
1082 : /* Register addr with Zebra NHT */
1083 2 : nht_p = rp_info->rp.rpf_addr;
1084 2 : if (PIM_DEBUG_PIM_NHT_RP)
1085 0 : zlog_debug(
1086 : "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
1087 : __func__, &nht_p, &rp_info->group);
1088 2 : pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
1089 2 : pim_rpf_set_refresh_time(pim);
1090 2 : (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
1091 : nht_p, &rp_info->group, 1);
1092 2 : return (&rp_info->rp);
1093 : }
1094 :
1095 : // About to Go Down
1096 : return NULL;
1097 : }
1098 :
1099 : /*
1100 : * Set the upstream IP address we want to talk to based upon
1101 : * the rp configured and the source address
1102 : *
1103 : * If we have don't have a RP configured and the source address is *
1104 : * then set the upstream addr as INADDR_ANY and return failure.
1105 : *
1106 : */
1107 5 : int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
1108 : pim_addr source, pim_addr group)
1109 : {
1110 5 : struct rp_info *rp_info;
1111 5 : struct prefix g;
1112 :
1113 5 : memset(&g, 0, sizeof(g));
1114 :
1115 5 : pim_addr_to_prefix(&g, group);
1116 :
1117 5 : rp_info = pim_rp_find_match_group(pim, &g);
1118 :
1119 5 : if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
1120 5 : (pim_addr_is_any(source)))) {
1121 0 : if (PIM_DEBUG_PIM_NHT_RP)
1122 0 : zlog_debug("%s: Received a (*,G) with no RP configured",
1123 : __func__);
1124 0 : *up = PIMADDR_ANY;
1125 0 : return 0;
1126 : }
1127 :
1128 5 : if (pim_addr_is_any(source))
1129 0 : *up = rp_info->rp.rpf_addr;
1130 : else
1131 5 : *up = source;
1132 :
1133 : return 1;
1134 : }
1135 :
1136 0 : int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
1137 : const char *spaces)
1138 : {
1139 0 : struct listnode *node;
1140 0 : struct rp_info *rp_info;
1141 0 : int count = 0;
1142 0 : pim_addr rp_addr;
1143 :
1144 0 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1145 0 : if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1146 0 : continue;
1147 :
1148 0 : if (rp_info->rp_src == RP_SRC_BSR)
1149 0 : continue;
1150 :
1151 0 : rp_addr = rp_info->rp.rpf_addr;
1152 0 : if (rp_info->plist)
1153 0 : vty_out(vty,
1154 : "%s" PIM_AF_NAME
1155 : " pim rp %pPA prefix-list %s\n",
1156 : spaces, &rp_addr, rp_info->plist);
1157 : else
1158 0 : vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
1159 : spaces, &rp_addr, &rp_info->group);
1160 0 : count++;
1161 : }
1162 :
1163 0 : return count;
1164 : }
1165 :
1166 0 : void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
1167 : struct vty *vty, json_object *json)
1168 : {
1169 0 : struct rp_info *rp_info;
1170 0 : struct rp_info *prev_rp_info = NULL;
1171 0 : struct listnode *node;
1172 0 : struct ttable *tt = NULL;
1173 0 : char *table = NULL;
1174 0 : char source[7];
1175 0 : char grp[INET6_ADDRSTRLEN];
1176 :
1177 0 : json_object *json_rp_rows = NULL;
1178 0 : json_object *json_row = NULL;
1179 :
1180 0 : if (!json) {
1181 : /* Prepare table. */
1182 0 : tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
1183 0 : ttable_add_row(
1184 : tt,
1185 : "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
1186 0 : tt->style.cell.rpad = 2;
1187 0 : tt->style.corner = '+';
1188 0 : ttable_restyle(tt);
1189 : }
1190 :
1191 0 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1192 0 : if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1193 0 : continue;
1194 :
1195 : #if PIM_IPV == 4
1196 : pim_addr group = rp_info->group.u.prefix4;
1197 : #else
1198 0 : pim_addr group = rp_info->group.u.prefix6;
1199 : #endif
1200 0 : const char *group_type =
1201 0 : pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
1202 :
1203 0 : if (range && !prefix_match(&rp_info->group, range))
1204 0 : continue;
1205 :
1206 0 : if (rp_info->rp_src == RP_SRC_STATIC)
1207 0 : strlcpy(source, "Static", sizeof(source));
1208 0 : else if (rp_info->rp_src == RP_SRC_BSR)
1209 0 : strlcpy(source, "BSR", sizeof(source));
1210 : else
1211 0 : strlcpy(source, "None", sizeof(source));
1212 0 : if (json) {
1213 : /*
1214 : * If we have moved on to a new RP then add the
1215 : * entry for the previous RP
1216 : */
1217 0 : if (prev_rp_info &&
1218 0 : (pim_addr_cmp(prev_rp_info->rp.rpf_addr,
1219 : rp_info->rp.rpf_addr))) {
1220 0 : json_object_object_addf(
1221 : json, json_rp_rows, "%pPA",
1222 : &prev_rp_info->rp.rpf_addr);
1223 0 : json_rp_rows = NULL;
1224 : }
1225 :
1226 0 : if (!json_rp_rows)
1227 0 : json_rp_rows = json_object_new_array();
1228 :
1229 0 : json_row = json_object_new_object();
1230 0 : json_object_string_addf(json_row, "rpAddress", "%pPA",
1231 : &rp_info->rp.rpf_addr);
1232 0 : if (rp_info->rp.source_nexthop.interface)
1233 0 : json_object_string_add(
1234 : json_row, "outboundInterface",
1235 : rp_info->rp.source_nexthop
1236 0 : .interface->name);
1237 : else
1238 0 : json_object_string_add(json_row,
1239 : "outboundInterface",
1240 : "Unknown");
1241 0 : if (rp_info->i_am_rp)
1242 0 : json_object_boolean_true_add(json_row, "iAmRP");
1243 : else
1244 0 : json_object_boolean_false_add(json_row,
1245 : "iAmRP");
1246 :
1247 0 : if (rp_info->plist)
1248 0 : json_object_string_add(json_row, "prefixList",
1249 : rp_info->plist);
1250 : else
1251 0 : json_object_string_addf(json_row, "group",
1252 : "%pFX",
1253 : &rp_info->group);
1254 0 : json_object_string_add(json_row, "source", source);
1255 0 : json_object_string_add(json_row, "groupType",
1256 : group_type);
1257 :
1258 0 : json_object_array_add(json_rp_rows, json_row);
1259 : } else {
1260 0 : prefix2str(&rp_info->group, grp, sizeof(grp));
1261 0 : ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
1262 : &rp_info->rp.rpf_addr,
1263 0 : rp_info->plist
1264 : ? rp_info->plist
1265 : : grp,
1266 0 : rp_info->rp.source_nexthop.interface
1267 : ? rp_info->rp.source_nexthop
1268 : .interface->name
1269 : : "Unknown",
1270 0 : rp_info->i_am_rp
1271 : ? "yes"
1272 : : "no",
1273 : source, group_type);
1274 : }
1275 0 : prev_rp_info = rp_info;
1276 : }
1277 :
1278 : /* Dump the generated table. */
1279 0 : if (!json) {
1280 0 : table = ttable_dump(tt, "\n");
1281 0 : vty_out(vty, "%s\n", table);
1282 0 : XFREE(MTYPE_TMP, table);
1283 0 : ttable_del(tt);
1284 : } else {
1285 0 : if (prev_rp_info && json_rp_rows)
1286 0 : json_object_object_addf(json, json_rp_rows, "%pPA",
1287 : &prev_rp_info->rp.rpf_addr);
1288 : }
1289 0 : }
1290 :
1291 6 : void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
1292 : {
1293 6 : struct listnode *node = NULL;
1294 6 : struct rp_info *rp_info = NULL;
1295 6 : struct nexthop *nh_node = NULL;
1296 6 : pim_addr nht_p;
1297 6 : struct pim_nexthop_cache pnc;
1298 :
1299 18 : for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
1300 6 : if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
1301 6 : continue;
1302 :
1303 0 : nht_p = rp_info->rp.rpf_addr;
1304 0 : memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
1305 0 : if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
1306 0 : continue;
1307 :
1308 0 : for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
1309 : #if PIM_IPV == 4
1310 : if (!pim_addr_is_any(nh_node->gate.ipv4))
1311 : continue;
1312 : #else
1313 0 : if (!pim_addr_is_any(nh_node->gate.ipv6))
1314 0 : continue;
1315 : #endif
1316 :
1317 0 : struct interface *ifp1 = if_lookup_by_index(
1318 0 : nh_node->ifindex, pim->vrf->vrf_id);
1319 :
1320 0 : if (nbr->interface != ifp1)
1321 0 : continue;
1322 :
1323 : #if PIM_IPV == 4
1324 : nh_node->gate.ipv4 = nbr->source_addr;
1325 : #else
1326 0 : nh_node->gate.ipv6 = nbr->source_addr;
1327 : #endif
1328 0 : if (PIM_DEBUG_PIM_NHT_RP)
1329 0 : zlog_debug(
1330 : "%s: addr %pPA new nexthop addr %pPAs interface %s",
1331 : __func__, &nht_p, &nbr->source_addr,
1332 : ifp1->name);
1333 : }
1334 : }
1335 6 : }
|