"Fossies" - the Fresh Open Source Software Archive 
As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style:
standard) with prefixed line numbers and
code folding option.
Alternatively you can here
view or
download the uninterpreted source code file.
1 /* $OpenBSD: rde.c,v 1.81 2009/06/06 07:31:26 eric Exp $ */
2
3 /*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34
35 #include "ospf.h"
36 #include "ospfd.h"
37 #include "ospfe.h"
38 #include "log.h"
39 #include "rde.h"
40
41 void rde_sig_handler(int sig, short, void *);
42 void rde_shutdown(void);
43 void rde_dispatch_imsg(int, short, void *);
44 void rde_dispatch_parent(int, short, void *);
45
46 void rde_send_summary(pid_t);
47 void rde_send_summary_area(struct area *, pid_t);
48 void rde_nbr_init(u_int32_t);
49 void rde_nbr_free(void);
50 struct rde_nbr *rde_nbr_find(u_int32_t);
51 struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *);
52 void rde_nbr_del(struct rde_nbr *);
53
54 void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
55 int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
56 void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
57 void rde_req_list_free(struct rde_nbr *);
58
59 struct lsa *rde_asext_get(struct rroute *);
60 struct lsa *rde_asext_put(struct rroute *);
61
62 struct lsa *orig_asext_lsa(struct rroute *, u_int16_t);
63 struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
64
65 struct ospfd_conf *rdeconf = NULL, *nconf = NULL;
66 struct imsgev *iev_ospfe;
67 struct imsgev *iev_main;
68 struct rde_nbr *nbrself;
69 struct lsa_tree asext_tree;
70
71 /* ARGSUSED */
72 void
73 rde_sig_handler(int sig, short event, void *arg)
74 {
75 /*
76 * signal handler rules don't apply, libevent decouples for us
77 */
78
79 switch (sig) {
80 case SIGINT:
81 case SIGTERM:
82 rde_shutdown();
83 /* NOTREACHED */
84 default:
85 fatalx("unexpected signal");
86 }
87 }
88
89 /* route decision engine */
90 pid_t
91 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
92 int pipe_parent2ospfe[2])
93 {
94 struct event ev_sigint, ev_sigterm;
95 struct timeval now;
96 struct area *area;
97 struct iface *iface;
98 struct passwd *pw;
99 struct redistribute *r;
100 pid_t pid;
101
102 switch (pid = fork()) {
103 case -1:
104 fatal("cannot fork");
105 /* NOTREACHED */
106 case 0:
107 break;
108 default:
109 return (pid);
110 }
111
112 rdeconf = xconf;
113
114 if ((pw = getpwnam(OSPFD_USER)) == NULL)
115 fatal("getpwnam");
116
117 if (chroot(pw->pw_dir) == -1)
118 fatal("chroot");
119 if (chdir("/") == -1)
120 fatal("chdir(\"/\")");
121
122 setproctitle("route decision engine");
123 ospfd_process = PROC_RDE_ENGINE;
124
125 if (setgroups(1, &pw->pw_gid) ||
126 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
127 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
128 fatal("can't drop privileges");
129
130 event_init();
131 rde_nbr_init(NBR_HASHSIZE);
132 lsa_init(&asext_tree);
133
134 /* setup signal handler */
135 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
136 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
137 signal_add(&ev_sigint, NULL);
138 signal_add(&ev_sigterm, NULL);
139 signal(SIGPIPE, SIG_IGN);
140 signal(SIGHUP, SIG_IGN);
141
142 /* setup pipes */
143 close(pipe_ospfe2rde[0]);
144 close(pipe_parent2rde[0]);
145 close(pipe_parent2ospfe[0]);
146 close(pipe_parent2ospfe[1]);
147
148 if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
149 (iev_main = malloc(sizeof(struct imsgev))) == NULL)
150 fatal(NULL);
151 imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
152 iev_ospfe->handler = rde_dispatch_imsg;
153 imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
154 iev_main->handler = rde_dispatch_parent;
155
156 /* setup event handler */
157 iev_ospfe->events = EV_READ;
158 event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
159 iev_ospfe->handler, iev_ospfe);
160 event_add(&iev_ospfe->ev, NULL);
161
162 iev_main->events = EV_READ;
163 event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
164 iev_main->handler, iev_main);
165 event_add(&iev_main->ev, NULL);
166
167 evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
168 cand_list_init();
169 rt_init();
170
171 /* remove unneded stuff from config */
172 LIST_FOREACH(area, &rdeconf->area_list, entry)
173 LIST_FOREACH(iface, &area->iface_list, entry)
174 md_list_clr(&iface->auth_md_list);
175
176 while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
177 SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
178 free(r);
179 }
180
181 gettimeofday(&now, NULL);
182 rdeconf->uptime = now.tv_sec;
183
184 event_dispatch();
185
186 rde_shutdown();
187 /* NOTREACHED */
188
189 return (0);
190 }
191
192 void
193 rde_shutdown(void)
194 {
195 struct area *a;
196
197 stop_spf_timer(rdeconf);
198 cand_list_clr();
199 rt_clear();
200
201 while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
202 LIST_REMOVE(a, entry);
203 area_del(a);
204 }
205 rde_nbr_free();
206
207 msgbuf_clear(&iev_ospfe->ibuf.w);
208 free(iev_ospfe);
209 msgbuf_clear(&iev_main->ibuf.w);
210 free(iev_main);
211 free(rdeconf);
212
213 log_info("route decision engine exiting");
214 _exit(0);
215 }
216
217 int
218 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
219 u_int16_t datalen)
220 {
221 return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1, data, datalen));
222 }
223
224 /* ARGSUSED */
225 void
226 rde_dispatch_imsg(int fd, short event, void *bula)
227 {
228 struct imsgev *iev = bula;
229 struct imsgbuf *ibuf;
230 struct imsg imsg;
231 struct in_addr aid;
232 struct ls_req_hdr req_hdr;
233 struct lsa_hdr lsa_hdr, *db_hdr;
234 struct rde_nbr rn, *nbr;
235 struct timespec tp;
236 struct lsa *lsa;
237 struct area *area;
238 struct vertex *v;
239 char *buf;
240 ssize_t n;
241 time_t now;
242 int r, state, self, error, shut = 0;
243 u_int16_t l;
244
245 ibuf = &iev->ibuf;
246
247 if (event & EV_READ) {
248 if ((n = imsg_read(ibuf)) == -1)
249 fatal("imsg_read error");
250 if (n == 0) /* connection closed */
251 shut = 1;
252 }
253 if (event & EV_WRITE) {
254 if (msgbuf_write(&ibuf->w) == -1)
255 fatal("msgbuf_write");
256 }
257
258 clock_gettime(CLOCK_MONOTONIC, &tp);
259 now = tp.tv_sec;
260
261 for (;;) {
262 if ((n = imsg_get(ibuf, &imsg)) == -1)
263 fatal("rde_dispatch_imsg: imsg_read error");
264 if (n == 0)
265 break;
266
267 switch (imsg.hdr.type) {
268 case IMSG_NEIGHBOR_UP:
269 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
270 fatalx("invalid size of OE request");
271 memcpy(&rn, imsg.data, sizeof(rn));
272
273 if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
274 fatalx("rde_dispatch_imsg: "
275 "neighbor already exists");
276 break;
277 case IMSG_NEIGHBOR_DOWN:
278 rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
279 break;
280 case IMSG_NEIGHBOR_CHANGE:
281 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
282 fatalx("invalid size of OE request");
283 memcpy(&state, imsg.data, sizeof(state));
284
285 nbr = rde_nbr_find(imsg.hdr.peerid);
286 if (nbr == NULL)
287 break;
288
289 if (state != nbr->state && (nbr->state & NBR_STA_FULL ||
290 state & NBR_STA_FULL))
291 area_track(nbr->area, state);
292
293 nbr->state = state;
294 if (nbr->state & NBR_STA_FULL)
295 rde_req_list_free(nbr);
296 break;
297 case IMSG_DB_SNAPSHOT:
298 nbr = rde_nbr_find(imsg.hdr.peerid);
299 if (nbr == NULL)
300 break;
301
302 lsa_snap(nbr->area, imsg.hdr.peerid);
303
304 imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
305 0, -1, NULL, 0);
306 break;
307 case IMSG_DD:
308 nbr = rde_nbr_find(imsg.hdr.peerid);
309 if (nbr == NULL)
310 break;
311
312 buf = imsg.data;
313 error = 0;
314 for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
315 l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
316 memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
317 buf += sizeof(lsa_hdr);
318
319 if (lsa_hdr.type == LSA_TYPE_EXTERNAL &&
320 nbr->area->stub) {
321 error = 1;
322 break;
323 }
324 v = lsa_find(nbr->area, lsa_hdr.type,
325 lsa_hdr.ls_id, lsa_hdr.adv_rtr);
326 if (v == NULL)
327 db_hdr = NULL;
328 else
329 db_hdr = &v->lsa->hdr;
330
331 if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
332 /*
333 * only request LSAs that are
334 * newer or missing
335 */
336 rde_req_list_add(nbr, &lsa_hdr);
337 imsg_compose_event(iev_ospfe, IMSG_DD,
338 imsg.hdr.peerid, 0, -1, &lsa_hdr,
339 sizeof(lsa_hdr));
340 }
341 }
342 if (l != 0 && !error)
343 log_warnx("rde_dispatch_imsg: peerid %lu, "
344 "trailing garbage in Database Description "
345 "packet", imsg.hdr.peerid);
346
347 if (!error)
348 imsg_compose_event(iev_ospfe, IMSG_DD_END,
349 imsg.hdr.peerid, 0, -1, NULL, 0);
350 else
351 imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA,
352 imsg.hdr.peerid, 0, -1, NULL, 0);
353 break;
354 case IMSG_LS_REQ:
355 nbr = rde_nbr_find(imsg.hdr.peerid);
356 if (nbr == NULL)
357 break;
358
359 buf = imsg.data;
360 for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
361 l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
362 memcpy(&req_hdr, buf, sizeof(req_hdr));
363 buf += sizeof(req_hdr);
364
365 if ((v = lsa_find(nbr->area,
366 ntohl(req_hdr.type), req_hdr.ls_id,
367 req_hdr.adv_rtr)) == NULL) {
368 imsg_compose_event(iev_ospfe, IMSG_LS_BADREQ,
369 imsg.hdr.peerid, 0, -1, NULL, 0);
370 continue;
371 }
372 imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
373 imsg.hdr.peerid, 0, -1, v->lsa,
374 ntohs(v->lsa->hdr.len));
375 }
376 if (l != 0)
377 log_warnx("rde_dispatch_imsg: peerid %lu, "
378 "trailing garbage in LS Request "
379 "packet", imsg.hdr.peerid);
380 break;
381 case IMSG_LS_UPD:
382 nbr = rde_nbr_find(imsg.hdr.peerid);
383 if (nbr == NULL)
384 break;
385
386 lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
387 if (lsa == NULL)
388 fatal(NULL);
389 memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
390
391 if (!lsa_check(nbr, lsa,
392 imsg.hdr.len - IMSG_HEADER_SIZE)) {
393 free(lsa);
394 break;
395 }
396
397 v = lsa_find(nbr->area, lsa->hdr.type, lsa->hdr.ls_id,
398 lsa->hdr.adv_rtr);
399 if (v == NULL)
400 db_hdr = NULL;
401 else
402 db_hdr = &v->lsa->hdr;
403
404 if (nbr->self) {
405 lsa_merge(nbr, lsa, v);
406 /* lsa_merge frees the right lsa */
407 break;
408 }
409
410 r = lsa_newer(&lsa->hdr, db_hdr);
411 if (r > 0) {
412 /* new LSA newer than DB */
413 if (v && v->flooded &&
414 v->changed + MIN_LS_ARRIVAL >= now) {
415 free(lsa);
416 break;
417 }
418
419 rde_req_list_del(nbr, &lsa->hdr);
420
421 if (!(self = lsa_self(nbr, lsa, v)))
422 if (lsa_add(nbr, lsa))
423 /* delayed lsa */
424 break;
425
426 /* flood and perhaps ack LSA */
427 imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
428 imsg.hdr.peerid, 0, -1, lsa,
429 ntohs(lsa->hdr.len));
430
431 /* reflood self originated LSA */
432 if (self && v)
433 imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
434 v->peerid, 0, -1, v->lsa,
435 ntohs(v->lsa->hdr.len));
436 /* lsa not added so free it */
437 if (self)
438 free(lsa);
439 } else if (r < 0) {
440 /* lsa no longer needed */
441 free(lsa);
442
443 /*
444 * point 6 of "The Flooding Procedure"
445 * We are violating the RFC here because
446 * it does not make sense to reset a session
447 * because an equal LSA is already in the table.
448 * Only if the LSA sent is older than the one
449 * in the table we should reset the session.
450 */
451 if (rde_req_list_exists(nbr, &lsa->hdr)) {
452 imsg_compose_event(iev_ospfe, IMSG_LS_BADREQ,
453 imsg.hdr.peerid, 0, -1, NULL, 0);
454 break;
455 }
456
457 /* new LSA older than DB */
458 if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
459 ntohs(db_hdr->age) == MAX_AGE)
460 /* seq-num wrap */
461 break;
462
463 if (v->changed + MIN_LS_ARRIVAL >= now)
464 break;
465
466 /* directly send current LSA, no ack */
467 imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
468 imsg.hdr.peerid, 0, -1, v->lsa,
469 ntohs(v->lsa->hdr.len));
470 } else {
471 /* LSA equal send direct ack */
472 imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
473 imsg.hdr.peerid, 0, -1, &lsa->hdr,
474 sizeof(lsa->hdr));
475 free(lsa);
476 }
477 break;
478 case IMSG_LS_MAXAGE:
479 nbr = rde_nbr_find(imsg.hdr.peerid);
480 if (nbr == NULL)
481 break;
482
483 if (imsg.hdr.len != IMSG_HEADER_SIZE +
484 sizeof(struct lsa_hdr))
485 fatalx("invalid size of OE request");
486 memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
487
488 if (rde_nbr_loading(nbr->area))
489 break;
490
491 v = lsa_find(nbr->area, lsa_hdr.type, lsa_hdr.ls_id,
492 lsa_hdr.adv_rtr);
493 if (v == NULL)
494 db_hdr = NULL;
495 else
496 db_hdr = &v->lsa->hdr;
497
498 /*
499 * only delete LSA if the one in the db is not newer
500 */
501 if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
502 lsa_del(nbr, &lsa_hdr);
503 break;
504 case IMSG_CTL_SHOW_DATABASE:
505 case IMSG_CTL_SHOW_DB_EXT:
506 case IMSG_CTL_SHOW_DB_NET:
507 case IMSG_CTL_SHOW_DB_RTR:
508 case IMSG_CTL_SHOW_DB_SELF:
509 case IMSG_CTL_SHOW_DB_SUM:
510 case IMSG_CTL_SHOW_DB_ASBR:
511 if (imsg.hdr.len != IMSG_HEADER_SIZE &&
512 imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
513 log_warnx("rde_dispatch_imsg: wrong imsg len");
514 break;
515 }
516 if (imsg.hdr.len == IMSG_HEADER_SIZE) {
517 LIST_FOREACH(area, &rdeconf->area_list, entry) {
518 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
519 0, imsg.hdr.pid, -1, area,
520 sizeof(*area));
521 lsa_dump(&area->lsa_tree, imsg.hdr.type,
522 imsg.hdr.pid);
523 }
524 lsa_dump(&asext_tree, imsg.hdr.type,
525 imsg.hdr.pid);
526 } else {
527 memcpy(&aid, imsg.data, sizeof(aid));
528 if ((area = area_find(rdeconf, aid)) != NULL) {
529 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
530 0, imsg.hdr.pid, -1, area,
531 sizeof(*area));
532 lsa_dump(&area->lsa_tree, imsg.hdr.type,
533 imsg.hdr.pid);
534 if (!area->stub)
535 lsa_dump(&asext_tree,
536 imsg.hdr.type,
537 imsg.hdr.pid);
538 }
539 }
540 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
541 -1, NULL, 0);
542 break;
543 case IMSG_CTL_SHOW_RIB:
544 LIST_FOREACH(area, &rdeconf->area_list, entry) {
545 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
546 0, imsg.hdr.pid, -1, area, sizeof(*area));
547
548 rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
549 rt_dump(area->id, imsg.hdr.pid, RIB_NET);
550 }
551 aid.s_addr = 0;
552 rt_dump(aid, imsg.hdr.pid, RIB_EXT);
553
554 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
555 -1, NULL, 0);
556 break;
557 case IMSG_CTL_SHOW_SUM:
558 rde_send_summary(imsg.hdr.pid);
559 LIST_FOREACH(area, &rdeconf->area_list, entry)
560 rde_send_summary_area(area, imsg.hdr.pid);
561 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
562 -1, NULL, 0);
563 break;
564 default:
565 log_debug("rde_dispatch_imsg: unexpected imsg %d",
566 imsg.hdr.type);
567 break;
568 }
569 imsg_free(&imsg);
570 }
571 if (!shut)
572 imsg_event_add(iev);
573 else {
574 /* this pipe is dead, so remove the event handler */
575 event_del(&iev->ev);
576 event_loopexit(NULL);
577 }
578 }
579
580 /* ARGSUSED */
581 void
582 rde_dispatch_parent(int fd, short event, void *bula)
583 {
584 static struct area *narea;
585 struct iface *niface;
586 struct imsg imsg;
587 struct rroute rr;
588 struct imsgev *iev = bula;
589 struct imsgbuf *ibuf;
590 struct lsa *lsa;
591 struct vertex *v;
592 struct redistribute *nred;
593 ssize_t n;
594 int shut = 0;
595
596 ibuf = &iev->ibuf;
597
598 if (event & EV_READ) {
599 if ((n = imsg_read(ibuf)) == -1)
600 fatal("imsg_read error");
601 if (n == 0) /* connection closed */
602 shut = 1;
603 }
604 if (event & EV_WRITE) {
605 if (msgbuf_write(&ibuf->w) == -1)
606 fatal("msgbuf_write");
607 }
608
609 for (;;) {
610 if ((n = imsg_get(ibuf, &imsg)) == -1)
611 fatal("rde_dispatch_parent: imsg_read error");
612 if (n == 0)
613 break;
614
615 switch (imsg.hdr.type) {
616 case IMSG_NETWORK_ADD:
617 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
618 log_warnx("rde_dispatch_parent: "
619 "wrong imsg len");
620 break;
621 }
622 memcpy(&rr, imsg.data, sizeof(rr));
623
624 if ((lsa = rde_asext_get(&rr)) != NULL) {
625 v = lsa_find(NULL, lsa->hdr.type,
626 lsa->hdr.ls_id, lsa->hdr.adv_rtr);
627
628 lsa_merge(nbrself, lsa, v);
629 }
630 break;
631 case IMSG_NETWORK_DEL:
632 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
633 log_warnx("rde_dispatch_parent: "
634 "wrong imsg len");
635 break;
636 }
637 memcpy(&rr, imsg.data, sizeof(rr));
638
639 if ((lsa = rde_asext_put(&rr)) != NULL) {
640 v = lsa_find(NULL, lsa->hdr.type,
641 lsa->hdr.ls_id, lsa->hdr.adv_rtr);
642
643 /*
644 * if v == NULL no LSA is in the table and
645 * nothing has to be done.
646 */
647 if (v)
648 lsa_merge(nbrself, lsa, v);
649 }
650 break;
651 case IMSG_RECONF_CONF:
652 if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
653 NULL)
654 fatal(NULL);
655 memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
656
657 LIST_INIT(&nconf->area_list);
658 LIST_INIT(&nconf->cand_list);
659 break;
660 case IMSG_RECONF_AREA:
661 if ((narea = area_new()) == NULL)
662 fatal(NULL);
663 memcpy(narea, imsg.data, sizeof(struct area));
664
665 LIST_INIT(&narea->iface_list);
666 LIST_INIT(&narea->nbr_list);
667 RB_INIT(&narea->lsa_tree);
668 SIMPLEQ_INIT(&narea->redist_list);
669
670 LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
671 break;
672 case IMSG_RECONF_REDIST:
673 if ((nred= malloc(sizeof(struct redistribute))) == NULL)
674 fatal(NULL);
675 memcpy(nred, imsg.data, sizeof(struct redistribute));
676
677 SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry);
678 break;
679 case IMSG_RECONF_IFACE:
680 if ((niface = malloc(sizeof(struct iface))) == NULL)
681 fatal(NULL);
682 memcpy(niface, imsg.data, sizeof(struct iface));
683
684 LIST_INIT(&niface->nbr_list);
685 TAILQ_INIT(&niface->ls_ack_list);
686 TAILQ_INIT(&niface->auth_md_list);
687
688 niface->area = narea;
689 LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
690
691 break;
692 case IMSG_RECONF_END:
693 merge_config(rdeconf, nconf);
694 nconf = NULL;
695 break;
696 default:
697 log_debug("rde_dispatch_parent: unexpected imsg %d",
698 imsg.hdr.type);
699 break;
700 }
701 imsg_free(&imsg);
702 }
703 if (!shut)
704 imsg_event_add(iev);
705 else {
706 /* this pipe is dead, so remove the event handler */
707 event_del(&iev->ev);
708 event_loopexit(NULL);
709 }
710 }
711
712 u_int32_t
713 rde_router_id(void)
714 {
715 return (rdeconf->rtr_id.s_addr);
716 }
717
718 struct area *
719 rde_backbone_area(void)
720 {
721 struct in_addr id;
722
723 id.s_addr = INADDR_ANY;
724
725 return (area_find(rdeconf, id));
726 }
727
728 void
729 rde_send_change_kroute(struct rt_node *r)
730 {
731 int krcount = 0;
732 struct kroute kr;
733 struct rt_nexthop *rn;
734 struct buf *wbuf;
735
736 if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
737 sizeof(kr))) == NULL) {
738 return;
739 }
740
741 TAILQ_FOREACH(rn, &r->nexthop, entry) {
742 if (rn->invalid)
743 continue;
744 krcount++;
745
746 bzero(&kr, sizeof(kr));
747 kr.prefix.s_addr = r->prefix.s_addr;
748 kr.nexthop.s_addr = rn->nexthop.s_addr;
749 kr.prefixlen = r->prefixlen;
750 kr.ext_tag = r->ext_tag;
751 imsg_add(wbuf, &kr, sizeof(kr));
752 }
753 if (krcount == 0)
754 fatalx("rde_send_change_kroute: no valid nexthop found");
755 imsg_close(&iev_main->ibuf, wbuf);
756 imsg_event_add(iev_main);
757 }
758
759 void
760 rde_send_delete_kroute(struct rt_node *r)
761 {
762 struct kroute kr;
763
764 bzero(&kr, sizeof(kr));
765 kr.prefix.s_addr = r->prefix.s_addr;
766 kr.prefixlen = r->prefixlen;
767
768 imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1, &kr, sizeof(kr));
769 }
770
771 void
772 rde_send_summary(pid_t pid)
773 {
774 static struct ctl_sum sumctl;
775 struct timeval now;
776 struct area *area;
777 struct vertex *v;
778
779 bzero(&sumctl, sizeof(struct ctl_sum));
780
781 sumctl.rtr_id.s_addr = rde_router_id();
782 sumctl.spf_delay = rdeconf->spf_delay;
783 sumctl.spf_hold_time = rdeconf->spf_hold_time;
784
785 LIST_FOREACH(area, &rdeconf->area_list, entry)
786 sumctl.num_area++;
787
788 RB_FOREACH(v, lsa_tree, &asext_tree)
789 sumctl.num_ext_lsa++;
790
791 gettimeofday(&now, NULL);
792 if (rdeconf->uptime < now.tv_sec)
793 sumctl.uptime = now.tv_sec - rdeconf->uptime;
794 else
795 sumctl.uptime = 0;
796
797 sumctl.rfc1583compat = rdeconf->rfc1583compat;
798
799 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
800 sizeof(sumctl));
801 }
802
803 void
804 rde_send_summary_area(struct area *area, pid_t pid)
805 {
806 static struct ctl_sum_area sumareactl;
807 struct iface *iface;
808 struct rde_nbr *nbr;
809 struct lsa_tree *tree = &area->lsa_tree;
810 struct vertex *v;
811
812 bzero(&sumareactl, sizeof(struct ctl_sum_area));
813
814 sumareactl.area.s_addr = area->id.s_addr;
815 sumareactl.num_spf_calc = area->num_spf_calc;
816
817 LIST_FOREACH(iface, &area->iface_list, entry)
818 sumareactl.num_iface++;
819
820 LIST_FOREACH(nbr, &area->nbr_list, entry)
821 if (nbr->state == NBR_STA_FULL && !nbr->self)
822 sumareactl.num_adj_nbr++;
823
824 RB_FOREACH(v, lsa_tree, tree)
825 sumareactl.num_lsa++;
826
827 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
828 sizeof(sumareactl));
829 }
830
831 LIST_HEAD(rde_nbr_head, rde_nbr);
832
833 struct nbr_table {
834 struct rde_nbr_head *hashtbl;
835 u_int32_t hashmask;
836 } rdenbrtable;
837
838 #define RDE_NBR_HASH(x) \
839 &rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
840
841 void
842 rde_nbr_init(u_int32_t hashsize)
843 {
844 struct rde_nbr_head *head;
845 u_int32_t hs, i;
846
847 for (hs = 1; hs < hashsize; hs <<= 1)
848 ;
849 rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
850 if (rdenbrtable.hashtbl == NULL)
851 fatal("rde_nbr_init");
852
853 for (i = 0; i < hs; i++)
854 LIST_INIT(&rdenbrtable.hashtbl[i]);
855
856 rdenbrtable.hashmask = hs - 1;
857
858 if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
859 fatal("rde_nbr_init");
860
861 nbrself->id.s_addr = rde_router_id();
862 nbrself->peerid = NBR_IDSELF;
863 nbrself->state = NBR_STA_DOWN;
864 nbrself->self = 1;
865 head = RDE_NBR_HASH(NBR_IDSELF);
866 LIST_INSERT_HEAD(head, nbrself, hash);
867 }
868
869 void
870 rde_nbr_free(void)
871 {
872 free(nbrself);
873 free(rdenbrtable.hashtbl);
874 }
875
876 struct rde_nbr *
877 rde_nbr_find(u_int32_t peerid)
878 {
879 struct rde_nbr_head *head;
880 struct rde_nbr *nbr;
881
882 head = RDE_NBR_HASH(peerid);
883
884 LIST_FOREACH(nbr, head, hash) {
885 if (nbr->peerid == peerid)
886 return (nbr);
887 }
888
889 return (NULL);
890 }
891
892 struct rde_nbr *
893 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
894 {
895 struct rde_nbr_head *head;
896 struct rde_nbr *nbr;
897 struct area *area;
898
899 if (rde_nbr_find(peerid))
900 return (NULL);
901 if ((area = area_find(rdeconf, new->area_id)) == NULL)
902 fatalx("rde_nbr_new: unknown area");
903
904 if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
905 fatal("rde_nbr_new");
906
907 memcpy(nbr, new, sizeof(*nbr));
908 nbr->peerid = peerid;
909 nbr->area = area;
910
911 TAILQ_INIT(&nbr->req_list);
912
913 head = RDE_NBR_HASH(peerid);
914 LIST_INSERT_HEAD(head, nbr, hash);
915 LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
916
917 return (nbr);
918 }
919
920 void
921 rde_nbr_del(struct rde_nbr *nbr)
922 {
923 if (nbr == NULL)
924 return;
925
926 rde_req_list_free(nbr);
927
928 LIST_REMOVE(nbr, entry);
929 LIST_REMOVE(nbr, hash);
930
931 free(nbr);
932 }
933
934 int
935 rde_nbr_loading(struct area *area)
936 {
937 struct rde_nbr *nbr;
938 int checkall = 0;
939
940 if (area == NULL) {
941 area = LIST_FIRST(&rdeconf->area_list);
942 checkall = 1;
943 }
944
945 while (area != NULL) {
946 LIST_FOREACH(nbr, &area->nbr_list, entry) {
947 if (nbr->self)
948 continue;
949 if (nbr->state & NBR_STA_XCHNG ||
950 nbr->state & NBR_STA_LOAD)
951 return (1);
952 }
953 if (!checkall)
954 break;
955 area = LIST_NEXT(area, entry);
956 }
957
958 return (0);
959 }
960
961 struct rde_nbr *
962 rde_nbr_self(struct area *area)
963 {
964 struct rde_nbr *nbr;
965
966 LIST_FOREACH(nbr, &area->nbr_list, entry)
967 if (nbr->self)
968 return (nbr);
969
970 /* this may not happen */
971 fatalx("rde_nbr_self: area without self");
972 return (NULL);
973 }
974
975 /*
976 * LSA req list
977 */
978 void
979 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
980 {
981 struct rde_req_entry *le;
982
983 if ((le = calloc(1, sizeof(*le))) == NULL)
984 fatal("rde_req_list_add");
985
986 TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
987 le->type = lsa->type;
988 le->ls_id = lsa->ls_id;
989 le->adv_rtr = lsa->adv_rtr;
990 }
991
992 int
993 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
994 {
995 struct rde_req_entry *le;
996
997 TAILQ_FOREACH(le, &nbr->req_list, entry) {
998 if ((lsa_hdr->type == le->type) &&
999 (lsa_hdr->ls_id == le->ls_id) &&
1000 (lsa_hdr->adv_rtr == le->adv_rtr))
1001 return (1);
1002 }
1003 return (0);
1004 }
1005
1006 void
1007 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1008 {
1009 struct rde_req_entry *le;
1010
1011 TAILQ_FOREACH(le, &nbr->req_list, entry) {
1012 if ((lsa_hdr->type == le->type) &&
1013 (lsa_hdr->ls_id == le->ls_id) &&
1014 (lsa_hdr->adv_rtr == le->adv_rtr)) {
1015 TAILQ_REMOVE(&nbr->req_list, le, entry);
1016 free(le);
1017 return;
1018 }
1019 }
1020 }
1021
1022 void
1023 rde_req_list_free(struct rde_nbr *nbr)
1024 {
1025 struct rde_req_entry *le;
1026
1027 while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1028 TAILQ_REMOVE(&nbr->req_list, le, entry);
1029 free(le);
1030 }
1031 }
1032
1033 /*
1034 * as-external LSA handling
1035 */
1036 struct lsa *
1037 rde_asext_get(struct rroute *rr)
1038 {
1039 struct area *area;
1040 struct iface *iface;
1041
1042 LIST_FOREACH(area, &rdeconf->area_list, entry)
1043 LIST_FOREACH(iface, &area->iface_list, entry) {
1044 if ((iface->addr.s_addr & iface->mask.s_addr) ==
1045 rr->kr.prefix.s_addr && iface->mask.s_addr ==
1046 prefixlen2mask(rr->kr.prefixlen)) {
1047 /* already announced as (stub) net LSA */
1048 log_debug("rde_asext_get: %s/%d is net LSA",
1049 inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1050 return (NULL);
1051 }
1052 }
1053
1054 /* update of seqnum is done by lsa_merge */
1055 return (orig_asext_lsa(rr, DEFAULT_AGE));
1056 }
1057
1058 struct lsa *
1059 rde_asext_put(struct rroute *rr)
1060 {
1061 /*
1062 * just try to remove the LSA. If the prefix is announced as
1063 * stub net LSA lsa_find() will fail later and nothing will happen.
1064 */
1065
1066 /* remove by reflooding with MAX_AGE */
1067 return (orig_asext_lsa(rr, MAX_AGE));
1068 }
1069
1070 /*
1071 * summary LSA stuff
1072 */
1073 void
1074 rde_summary_update(struct rt_node *rte, struct area *area)
1075 {
1076 struct rt_nexthop *rn;
1077 struct rt_node *nr;
1078 struct vertex *v = NULL;
1079 struct lsa *lsa;
1080 u_int8_t type = 0;
1081
1082 /* first check if we actually need to announce this route */
1083 if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1084 return;
1085 /* never create summaries for as-ext LSA */
1086 if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1087 return;
1088 /* no need for summary LSA in the originating area */
1089 if (rte->area.s_addr == area->id.s_addr)
1090 return;
1091 /* no need to originate inter-area routes to the backbone */
1092 if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1093 return;
1094 /* nexthop check, nexthop part of area -> no summary */
1095 TAILQ_FOREACH(rn, &rte->nexthop, entry) {
1096 nr = rt_lookup(DT_NET, rn->nexthop.s_addr);
1097 if (nr && nr->area.s_addr == area->id.s_addr)
1098 continue;
1099 break;
1100 }
1101 if (rn == NULL) /* all nexthops belong to this area */
1102 return;
1103
1104 if (rte->cost >= LS_INFINITY)
1105 return;
1106 /* TODO AS border router specific checks */
1107 /* TODO inter-area network route stuff */
1108 /* TODO intra-area stuff -- condense LSA ??? */
1109
1110 if (rte->d_type == DT_NET) {
1111 type = LSA_TYPE_SUM_NETWORK;
1112 } else if (rte->d_type == DT_RTR) {
1113 if (area->stub)
1114 /* do not redistribute type 4 LSA into stub areas */
1115 return;
1116 type = LSA_TYPE_SUM_ROUTER;
1117 } else
1118 fatalx("rde_summary_update: unknown route type");
1119
1120 /* update lsa but only if it was changed */
1121 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1122 lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1123 lsa_merge(rde_nbr_self(area), lsa, v);
1124
1125 if (v == NULL)
1126 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1127
1128 /* suppressed/deleted routes are not found in the second lsa_find */
1129 if (v)
1130 v->cost = rte->cost;
1131 }
1132
1133
1134 /*
1135 * functions for self-originated LSA
1136 */
1137 struct lsa *
1138 orig_asext_lsa(struct rroute *rr, u_int16_t age)
1139 {
1140 struct lsa *lsa;
1141 u_int16_t len;
1142
1143 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1144 if ((lsa = calloc(1, len)) == NULL)
1145 fatal("orig_asext_lsa");
1146
1147 log_debug("orig_asext_lsa: %s/%d age %d",
1148 inet_ntoa(rr->kr.prefix), rr->kr.prefixlen, age);
1149
1150 /* LSA header */
1151 lsa->hdr.age = htons(age);
1152 lsa->hdr.opts = area_ospf_options(NULL);
1153 lsa->hdr.type = LSA_TYPE_EXTERNAL;
1154 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1155 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1156 lsa->hdr.len = htons(len);
1157
1158 /* prefix and mask */
1159 /*
1160 * TODO ls_id must be unique, for overlapping routes this may
1161 * not be true. In this case a hack needs to be done to
1162 * make the ls_id unique.
1163 */
1164 lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1165 lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1166
1167 /*
1168 * nexthop -- on connected routes we are the nexthop,
1169 * on all other cases we announce the true nexthop.
1170 * XXX this is wrong as the true nexthop may be outside
1171 * of the ospf cloud and so unreachable. For now we force
1172 * all traffic to be directed to us.
1173 */
1174 lsa->data.asext.fw_addr = 0;
1175
1176 lsa->data.asext.metric = htonl(rr->metric);
1177 lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1178
1179 lsa->hdr.ls_chksum = 0;
1180 lsa->hdr.ls_chksum =
1181 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1182
1183 return (lsa);
1184 }
1185
1186 struct lsa *
1187 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1188 {
1189 struct lsa *lsa;
1190 u_int16_t len;
1191
1192 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1193 if ((lsa = calloc(1, len)) == NULL)
1194 fatal("orig_sum_lsa");
1195
1196 /* LSA header */
1197 lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1198 lsa->hdr.opts = area_ospf_options(area);
1199 lsa->hdr.type = type;
1200 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1201 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1202 lsa->hdr.len = htons(len);
1203
1204 /* prefix and mask */
1205 /*
1206 * TODO ls_id must be unique, for overlapping routes this may
1207 * not be true. In this case a hack needs to be done to
1208 * make the ls_id unique.
1209 */
1210 lsa->hdr.ls_id = rte->prefix.s_addr;
1211 if (type == LSA_TYPE_SUM_NETWORK)
1212 lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1213 else
1214 lsa->data.sum.mask = 0; /* must be zero per RFC */
1215
1216 lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1217
1218 lsa->hdr.ls_chksum = 0;
1219 lsa->hdr.ls_chksum =
1220 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1221
1222 return (lsa);
1223 }