"Fossies" - the Fresh Open Source Software Archive 
Member "vpnc-0.5.3/tunip.c" (19 Nov 2008, 29273 Bytes) of package /linux/privat/old/vpnc-0.5.3.tar.gz:
As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style:
standard) with prefixed line numbers and
code folding option.
Alternatively you can here
view or
download the uninterpreted source code file.
For more information about "tunip.c" see the
Fossies "Dox" file reference documentation.
1 /* IPSec ESP and AH support.
2 Copyright (c) 1999 Pierre Beyssac
3 Copyright (C) 2002 Geoffrey Keating
4 Copyright (C) 2003-2007 Maurice Massar
5 Copyright (C) 2004 Tomas Mraz
6 Copyright (C) 2005 Michael Tilstra
7 Copyright (C) 2006 Daniel Roethlisberger
8 Copyright (C) 2007 Paolo Zarpellon (tap+Cygwin support)
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23
24 $Id: tunip.c 371 2008-11-19 20:55:28Z Joerg Mayer $
25 */
26
27 /* borrowed from pipsecd (-; */
28
29 /*-
30 * Copyright (c) 1999 Pierre Beyssac
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 */
55
56 #include <sys/types.h>
57 #include <sys/socket.h>
58 #include <errno.h>
59 #include <assert.h>
60 #include <unistd.h>
61 #include <fcntl.h>
62 #include <stdio.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #ifndef __SKYOS__
67 #include <netinet/ip_icmp.h>
68 #endif
69 #include <arpa/inet.h>
70 #include <stdlib.h>
71 #include <string.h>
72 #include <strings.h>
73 #include <syslog.h>
74 #include <time.h>
75 #include <sys/select.h>
76 #include <signal.h>
77
78 #ifdef __CYGWIN__
79 #include <pthread.h>
80 #endif
81
82 #if !defined(__sun__) && !defined(__SKYOS__)
83 #include <err.h>
84 #endif
85
86 #include <gcrypt.h>
87 #include "sysdep.h"
88 #include "config.h"
89 #include "vpnc.h"
90
91 #include "tunip.h"
92
93 #ifndef MAX
94 #define MAX(a,b) ((a)>(b)?(a):(b))
95 #endif
96
97 #ifndef FD_COPY
98 #define FD_COPY(f, t) ((void)memcpy((t), (f), sizeof(*(f))))
99 #endif
100
101 /* A real ESP header (RFC 2406) */
102 typedef struct esp_encap_header {
103 uint32_t spi; /* security parameters index */
104 uint32_t seq_id; /* sequence id (unimplemented) */
105 /* variable-length payload data + padding */
106 /* unsigned char next_header */
107 /* optional auth data */
108 } __attribute__((packed)) esp_encap_header_t;
109
110 struct encap_method {
111 int fixed_header_size;
112
113 int (*recv) (struct sa_block *s, unsigned char *buf, unsigned int bufsize);
114 void (*send_peer) (struct sa_block *s, unsigned char *buf, unsigned int bufsize);
115 int (*recv_peer) (struct sa_block *s);
116 };
117
118 /* Yuck! Global variables... */
119
120 #define MAX_HEADER 72
121 #define MAX_PACKET 4096
122 int volatile do_kill;
123 static uint8_t global_buffer_rx[MAX_HEADER + MAX_PACKET + ETH_HLEN];
124 static uint8_t global_buffer_tx[MAX_HEADER + MAX_PACKET + ETH_HLEN];
125
126 /*
127 * in_cksum --
128 * Checksum routine for Internet Protocol family headers (C Version)
129 */
130 static u_short in_cksum(u_short *addr, int len)
131 {
132 register int nleft = len;
133 register u_short *w = addr;
134 register int sum = 0;
135 u_short answer = 0;
136
137 /*
138 * Our algorithm is simple, using a 32 bit accumulator (sum), we add
139 * sequential 16 bit words to it, and at the end, fold back all the
140 * carry bits from the top 16 bits into the lower 16 bits.
141 */
142 while (nleft > 1) {
143 sum += *w++;
144 nleft -= 2;
145 }
146
147 /* mop up an odd byte, if necessary */
148 if (nleft == 1) {
149 *(u_char *) (&answer) = *(u_char *) w;
150 sum += answer;
151 }
152
153 /* add back carry outs from top 16 bits to low 16 bits */
154 sum = (sum >> 16) + (sum & 0xffff); /* add hi 16 to low 16 */
155 sum += (sum >> 16); /* add carry */
156 answer = ~sum; /* truncate to 16 bits */
157 return (answer);
158 }
159
160 /*
161 * Decapsulate from a raw IP packet
162 */
163 static int encap_rawip_recv(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
164 {
165 ssize_t r;
166 struct ip *p = (struct ip *)buf;
167 struct sockaddr_in from;
168 socklen_t fromlen = sizeof(from);
169
170 r = recvfrom(s->esp_fd, buf, bufsize, 0, (struct sockaddr *)&from, &fromlen);
171 if (r == -1) {
172 syslog(LOG_ERR, "recvfrom: %m");
173 return -1;
174 }
175 if (from.sin_addr.s_addr != s->dst.s_addr) {
176 syslog(LOG_ALERT, "packet from unknown host %s", inet_ntoa(from.sin_addr));
177 return -1;
178 }
179 if (r < (p->ip_hl << 2) + s->ipsec.em->fixed_header_size) {
180 syslog(LOG_ALERT, "packet too short. got %d, expected %d", r, (p->ip_hl << 2) + s->ipsec.em->fixed_header_size);
181 return -1;
182 }
183
184 #ifdef NEED_IPLEN_FIX
185 p->ip_len = r;
186 #else
187 p->ip_len = ntohs(r);
188 #endif
189
190 s->ipsec.rx.buf = buf;
191 s->ipsec.rx.buflen = r;
192 s->ipsec.rx.bufpayload = (p->ip_hl << 2);
193 s->ipsec.rx.bufsize = bufsize;
194 return r;
195 }
196
197 /*
198 * Decapsulate from an UDP packet
199 */
200 static int encap_udp_recv(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
201 {
202 ssize_t r;
203
204 r = recv(s->esp_fd, buf, bufsize, 0);
205 if (r == -1) {
206 syslog(LOG_ERR, "recvfrom: %m");
207 return -1;
208 }
209 if (s->ipsec.natt_active_mode == NATT_ACTIVE_DRAFT_OLD && r > 8) {
210 r -= 8;
211 memmove(buf, buf + 8, r);
212 }
213 if( r == 1 && *buf == 0xff )
214 {
215 DEBUGTOP(1, printf("UDP NAT keepalive packet received\n"));
216 return -1;
217 }
218 if (r < s->ipsec.em->fixed_header_size) {
219 syslog(LOG_ALERT, "packet too short from %s. got %d, expected %d",
220 inet_ntoa(s->dst), r, s->ipsec.em->fixed_header_size);
221 return -1;
222 }
223
224 s->ipsec.rx.buf = buf;
225 s->ipsec.rx.buflen = r;
226 s->ipsec.rx.bufpayload = 0;
227 s->ipsec.rx.bufsize = bufsize;
228 return r;
229 }
230
231 /*
232 * Decapsulate packet
233 */
234 static int encap_any_decap(struct sa_block *s)
235 {
236 s->ipsec.rx.buflen -= s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size;
237 s->ipsec.rx.buf += s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size;
238 if (s->ipsec.rx.buflen == 0)
239 return 0;
240 return 1;
241 }
242
243 /*
244 * Send decapsulated packet to tunnel device
245 */
246 static int tun_send_ip(struct sa_block *s)
247 {
248 int sent, len;
249 uint8_t *start;
250
251 start = s->ipsec.rx.buf;
252 len = s->ipsec.rx.buflen;
253
254 if (opt_if_mode == IF_MODE_TAP) {
255 #ifndef __sun__
256 /*
257 * Add ethernet header before s->ipsec.rx.buf where
258 * at least ETH_HLEN bytes should be available.
259 */
260 struct ether_header *eth_hdr = (struct ether_header *) (s->ipsec.rx.buf - ETH_HLEN);
261
262 memcpy(eth_hdr->ether_dhost, s->tun_hwaddr, ETH_ALEN);
263 memcpy(eth_hdr->ether_shost, s->tun_hwaddr, ETH_ALEN);
264
265 /* Use a different MAC as source */
266 eth_hdr->ether_shost[0] ^= 0x80; /* toggle some visible bit */
267 eth_hdr->ether_type = htons(ETHERTYPE_IP);
268
269 start = (uint8_t *) eth_hdr;
270 len += ETH_HLEN;
271 #endif
272 }
273
274 sent = tun_write(s->tun_fd, start, len);
275 if (sent != len)
276 syslog(LOG_ERR, "truncated in: %d -> %d\n", len, sent);
277 hex_dump("Tx pkt", start, len, NULL);
278 return 1;
279 }
280
281 /*
282 * Compute HMAC for an arbitrary stream of bytes
283 */
284 static int hmac_compute(int md_algo,
285 const unsigned char *data, unsigned int data_size,
286 unsigned char *digest, unsigned char do_store,
287 const unsigned char *secret, unsigned short secret_size)
288 {
289 gcry_md_hd_t md_ctx;
290 int ret;
291 unsigned char *hmac_digest;
292 unsigned int hmac_len;
293
294 /* See RFC 2104 */
295 gcry_md_open(&md_ctx, md_algo, GCRY_MD_FLAG_HMAC);
296 assert(md_ctx != NULL);
297 ret = gcry_md_setkey(md_ctx, secret, secret_size);
298 assert(ret == 0);
299 gcry_md_write(md_ctx, data, data_size);
300 gcry_md_final(md_ctx);
301 hmac_digest = gcry_md_read(md_ctx, 0);
302 hmac_len = 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
303
304 if (do_store) {
305 memcpy(digest, hmac_digest, hmac_len);
306 ret = 0;
307 } else
308 ret = memcmp(digest, hmac_digest, hmac_len);
309
310 gcry_md_close(md_ctx);
311 return ret;
312 }
313
314 /*
315 * Encapsulate a packet in ESP
316 */
317 static void encap_esp_encapsulate(struct sa_block *s)
318 {
319 esp_encap_header_t *eh;
320 unsigned char *iv, *cleartext;
321 size_t i, padding, pad_blksz;
322 unsigned int cleartextlen;
323
324 /*
325 * Add padding as necessary
326 *
327 * done: this should be checked, RFC 2406 section 2.4 is quite
328 * obscure on that point.
329 * seems fine
330 */
331 pad_blksz = s->ipsec.blk_len;
332 while (pad_blksz & 3) /* must be multiple of 4 */
333 pad_blksz <<= 1;
334 padding = pad_blksz - ((s->ipsec.tx.buflen + 2 - s->ipsec.tx.var_header_size - s->ipsec.tx.bufpayload) % pad_blksz);
335 DEBUG(3, printf("sending packet: len = %d, padding = %lu\n", s->ipsec.tx.buflen, (unsigned long)padding));
336 if (padding == pad_blksz)
337 padding = 0;
338
339 for (i = 1; i <= padding; i++) {
340 s->ipsec.tx.buf[s->ipsec.tx.buflen] = i;
341 s->ipsec.tx.buflen++;
342 }
343
344 /* Add trailing padlen and next_header */
345 s->ipsec.tx.buf[s->ipsec.tx.buflen++] = padding;
346 s->ipsec.tx.buf[s->ipsec.tx.buflen++] = IPPROTO_IPIP;
347
348 cleartext = s->ipsec.tx.buf + s->ipsec.tx.var_header_size + s->ipsec.tx.bufpayload;
349 cleartextlen = s->ipsec.tx.buflen - s->ipsec.tx.var_header_size - s->ipsec.tx.bufpayload;
350
351 eh = (esp_encap_header_t *) (s->ipsec.tx.buf + s->ipsec.tx.bufpayload);
352 eh->spi = s->ipsec.tx.spi;
353 eh->seq_id = htonl(s->ipsec.tx.seq_id++);
354
355 /* Copy initialization vector in packet */
356 iv = (unsigned char *)(eh + 1);
357 gcry_create_nonce(iv, s->ipsec.iv_len);
358 hex_dump("iv", iv, s->ipsec.iv_len, NULL);
359
360 hex_dump("sending ESP packet (before crypt)", s->ipsec.tx.buf, s->ipsec.tx.buflen, NULL);
361
362 if (s->ipsec.cry_algo) {
363 gcry_cipher_setiv(s->ipsec.tx.cry_ctx, iv, s->ipsec.iv_len);
364 gcry_cipher_encrypt(s->ipsec.tx.cry_ctx, cleartext, cleartextlen, NULL, 0);
365 }
366
367 hex_dump("sending ESP packet (after crypt)", s->ipsec.tx.buf, s->ipsec.tx.buflen, NULL);
368
369 /* Handle optional authentication field */
370 if (s->ipsec.md_algo) {
371 hmac_compute(s->ipsec.md_algo,
372 s->ipsec.tx.buf + s->ipsec.tx.bufpayload,
373 s->ipsec.tx.var_header_size + cleartextlen,
374 s->ipsec.tx.buf + s->ipsec.tx.bufpayload
375 + s->ipsec.tx.var_header_size + cleartextlen,
376 1, s->ipsec.tx.key_md, s->ipsec.md_len);
377 s->ipsec.tx.buflen += 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
378 hex_dump("sending ESP packet (after ah)", s->ipsec.tx.buf, s->ipsec.tx.buflen, NULL);
379 }
380 }
381
382 /*
383 * Encapsulate a packet in IP ESP and send to the peer.
384 * "buf" should have exactly MAX_HEADER free bytes at its beginning
385 * to account for encapsulation data (not counted in "size").
386 */
387 static void encap_esp_send_peer(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
388 {
389 ssize_t sent;
390 struct ip *tip, ip;
391 struct sockaddr_in dstaddr;
392
393 buf += MAX_HEADER;
394
395 /* Keep a pointer to the old IP header */
396 tip = (struct ip *)buf;
397
398 s->ipsec.tx.buf = buf;
399 s->ipsec.tx.buflen = bufsize;
400
401 /* Prepend our encapsulation header and new IP header */
402 s->ipsec.tx.var_header_size = (s->ipsec.em->fixed_header_size + s->ipsec.iv_len);
403
404 s->ipsec.tx.buf -= sizeof(struct ip) + s->ipsec.tx.var_header_size;
405 s->ipsec.tx.buflen += sizeof(struct ip) + s->ipsec.tx.var_header_size;
406
407 s->ipsec.tx.bufpayload = sizeof(struct ip);
408
409 /* Fill non-mutable fields */
410 ip.ip_v = IPVERSION;
411 ip.ip_hl = 5;
412 /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
413 ip.ip_id = htons(s->ipsec.ip_id++);
414 ip.ip_p = IPPROTO_ESP;
415 ip.ip_src = s->src;
416 ip.ip_dst = s->dst;
417
418 /* Fill mutable fields */
419 ip.ip_tos = (bufsize < sizeof(struct ip)) ? 0 : tip->ip_tos;
420 ip.ip_off = 0;
421 ip.ip_ttl = IPDEFTTL;
422 ip.ip_sum = 0;
423
424 encap_esp_encapsulate(s);
425
426 ip.ip_len = s->ipsec.tx.buflen;
427 #ifdef NEED_IPLEN_FIX
428 ip.ip_len = htons(ip.ip_len);
429 #endif
430 ip.ip_sum = in_cksum((u_short *) s->ipsec.tx.buf, sizeof(struct ip));
431
432 memcpy(s->ipsec.tx.buf, &ip, sizeof ip);
433
434 dstaddr.sin_family = AF_INET;
435 dstaddr.sin_addr = s->dst;
436 dstaddr.sin_port = 0;
437 sent = sendto(s->esp_fd, s->ipsec.tx.buf, s->ipsec.tx.buflen, 0, (struct sockaddr *)&dstaddr, sizeof(struct sockaddr_in));
438 if (sent == -1) {
439 syslog(LOG_ERR, "esp sendto: %m");
440 return;
441 }
442 if (sent != s->ipsec.tx.buflen)
443 syslog(LOG_ALERT, "esp truncated out (%lld out of %d)", (long long)sent, s->ipsec.tx.buflen);
444 }
445
446 /*
447 * Encapsulate a packet in UDP ESP and send to the peer.
448 * "buf" should have exactly MAX_HEADER free bytes at its beginning
449 * to account for encapsulation data (not counted in "size").
450 */
451 static void encap_udp_send_peer(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
452 {
453 ssize_t sent;
454
455 buf += MAX_HEADER;
456
457 s->ipsec.tx.buf = buf;
458 s->ipsec.tx.buflen = bufsize;
459
460 /* Prepend our encapsulation header and new IP header */
461 s->ipsec.tx.var_header_size = (s->ipsec.em->fixed_header_size + s->ipsec.iv_len);
462
463 s->ipsec.tx.buf -= s->ipsec.tx.var_header_size;
464 s->ipsec.tx.buflen += s->ipsec.tx.var_header_size;
465
466 s->ipsec.tx.bufpayload = 0;
467
468 encap_esp_encapsulate(s);
469
470 if (s->ipsec.natt_active_mode == NATT_ACTIVE_DRAFT_OLD) {
471 s->ipsec.tx.buf -= 8;
472 s->ipsec.tx.buflen += 8;
473 memset(s->ipsec.tx.buf, 0, 8);
474 }
475
476 sent = send(s->esp_fd, s->ipsec.tx.buf, s->ipsec.tx.buflen, 0);
477 if (sent == -1) {
478 syslog(LOG_ERR, "udp sendto: %m");
479 return;
480 }
481 if (sent != s->ipsec.tx.buflen)
482 syslog(LOG_ALERT, "udp truncated out (%lld out of %d)",
483 (long long)sent, s->ipsec.tx.buflen);
484 }
485
486 static int encap_esp_recv_peer(struct sa_block *s)
487 {
488 int len, i;
489 size_t blksz;
490 unsigned char padlen, next_header;
491 unsigned char *pad;
492 unsigned char *iv;
493 struct esp_encap_header *eh;
494
495 eh = (struct esp_encap_header *)(s->ipsec.rx.buf + s->ipsec.rx.bufpayload);
496 s->ipsec.rx.var_header_size = s->ipsec.iv_len;
497 iv = s->ipsec.rx.buf + s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size;
498
499 len = s->ipsec.rx.buflen - s->ipsec.rx.bufpayload - s->ipsec.em->fixed_header_size - s->ipsec.rx.var_header_size;
500
501 if (len < 0) {
502 syslog(LOG_ALERT, "Packet too short");
503 return -1;
504 }
505
506 /* Handle optional authentication field */
507 if (s->ipsec.md_algo) {
508 len -= 12; /*gcry_md_get_algo_dlen(peer->local_sa->md_algo); */
509 s->ipsec.rx.buflen -= 12;
510 if (hmac_compute(s->ipsec.md_algo,
511 s->ipsec.rx.buf + s->ipsec.rx.bufpayload,
512 s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len,
513 s->ipsec.rx.buf + s->ipsec.rx.bufpayload
514 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len,
515 0,
516 s->ipsec.rx.key_md,
517 s->ipsec.md_len) != 0) {
518 syslog(LOG_ALERT, "HMAC mismatch in ESP mode");
519 return -1;
520 }
521 }
522
523 blksz = s->ipsec.blk_len;
524 if ((len % blksz) != 0) {
525 syslog(LOG_ALERT,
526 "payload len %d not a multiple of algorithm block size %lu", len,
527 (unsigned long)blksz);
528 return -1;
529 }
530
531 hex_dump("receiving ESP packet (before decrypt)",
532 &s->ipsec.rx.buf[s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size +
533 s->ipsec.rx.var_header_size], len, NULL);
534
535 if (s->ipsec.cry_algo) {
536 unsigned char *data;
537
538 data = (s->ipsec.rx.buf + s->ipsec.rx.bufpayload
539 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size);
540 gcry_cipher_setiv(s->ipsec.rx.cry_ctx, iv, s->ipsec.iv_len);
541 gcry_cipher_decrypt(s->ipsec.rx.cry_ctx, data, len, NULL, 0);
542 }
543
544 hex_dump("receiving ESP packet (after decrypt)",
545 &s->ipsec.rx.buf[s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size +
546 s->ipsec.rx.var_header_size], len, NULL);
547
548 padlen = s->ipsec.rx.buf[s->ipsec.rx.bufpayload
549 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len - 2];
550 next_header = s->ipsec.rx.buf[s->ipsec.rx.bufpayload
551 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len - 1];
552
553 if (padlen + 2 > len) {
554 syslog(LOG_ALERT, "Inconsistent padlen");
555 return -1;
556 }
557 if (next_header != IPPROTO_IPIP) {
558 syslog(LOG_ALERT, "Inconsistent next_header %d", next_header);
559 return -1;
560 }
561 DEBUG(3, printf("pad len: %d, next_header: %d\n", padlen, next_header));
562
563 len -= padlen + 2;
564 s->ipsec.rx.buflen -= padlen + 2;
565
566 /* Check padding */
567 pad = s->ipsec.rx.buf + s->ipsec.rx.bufpayload
568 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len;
569 for (i = 1; i <= padlen; i++) {
570 if (*pad != i) {
571 syslog(LOG_ALERT, "Bad padding");
572 return -1;
573 }
574 pad++;
575 }
576
577 return 0;
578 }
579
580 static void encap_esp_new(struct encap_method *encap)
581 {
582 encap->recv = encap_rawip_recv;
583 encap->send_peer = encap_esp_send_peer;
584 encap->recv_peer = encap_esp_recv_peer;
585 encap->fixed_header_size = sizeof(esp_encap_header_t);
586 }
587
588 static void encap_udp_new(struct encap_method *encap)
589 {
590 encap->recv = encap_udp_recv;
591 encap->send_peer = encap_udp_send_peer;
592 encap->recv_peer = encap_esp_recv_peer;
593 encap->fixed_header_size = sizeof(esp_encap_header_t);
594 }
595
596 /*
597 * Process ARP
598 * Return 1 if packet has been processed, 0 otherwise
599 */
600 static int process_arp(struct sa_block *s, uint8_t *frame)
601 {
602 #ifndef __sun__
603 int frame_size;
604 uint8_t tmp[4];
605 struct ether_header *eth = (struct ether_header *) frame;
606 struct ether_arp *arp = (struct ether_arp *) (frame + ETH_HLEN);
607
608 if (ntohs(eth->ether_type) != ETHERTYPE_ARP) {
609 return 0;
610 }
611
612 if (ntohs(arp->arp_hrd) != ARPHRD_ETHER ||
613 ntohs(arp->arp_pro) != 0x800 ||
614 arp->arp_hln != ETH_ALEN ||
615 arp->arp_pln != 4 ||
616 ntohs(arp->arp_op) != ARPOP_REQUEST ||
617 !memcmp(arp->arp_spa, arp->arp_tpa, 4) ||
618 memcmp(eth->ether_shost, s->tun_hwaddr, ETH_ALEN) ||
619 !memcmp(arp->arp_tpa, s->our_address, 4)) {
620 /* whatever .. just drop it */
621 return 1;
622 }
623
624 /* send arp reply */
625
626 memcpy(eth->ether_dhost, s->tun_hwaddr, ETH_ALEN);
627 eth->ether_shost[0] ^= 0x80; /* Use a different MAC as source */
628
629 memcpy(tmp, arp->arp_spa, 4);
630 memcpy(arp->arp_spa, arp->arp_tpa, 4);
631 memcpy(arp->arp_tpa, tmp, 4);
632
633 memcpy(arp->arp_tha, s->tun_hwaddr, ETH_ALEN);
634 arp->arp_sha[0] ^= 0x80; /* Use a different MAC as source */
635
636 arp->arp_op = htons(ARPOP_REPLY);
637
638 frame_size = ETH_HLEN + sizeof(struct ether_arp);
639 tun_write(s->tun_fd, frame, frame_size);
640 hex_dump("ARP reply", frame, frame_size, NULL);
641
642 return 1;
643 #else
644 s = 0;
645 frame = 0;
646 return 0;
647 #endif
648 }
649
650 /*
651 * Process non-IP packets
652 * Return 1 if packet has been processed, 0 otherwise
653 */
654 static int process_non_ip(struct sa_block *s, uint8_t *frame)
655 {
656 struct ether_header *eth = (struct ether_header *) frame;
657
658 s = NULL; /* unused */
659
660 if (ntohs(eth->ether_type) != ETHERTYPE_IP) {
661 /* drop non-ip traffic */
662 return 1;
663 }
664
665 return 0;
666 }
667
668 static void process_tun(struct sa_block *s)
669 {
670 int pack;
671 int size = MAX_PACKET;
672 uint8_t *start = global_buffer_rx + MAX_HEADER;
673
674 if (opt_if_mode == IF_MODE_TAP) {
675 /* Make sure IP packet starts at buf + MAX_HEADER */
676 start -= ETH_HLEN;
677 size += ETH_HLEN;
678 }
679
680 /* Receive a packet from the tunnel interface */
681 pack = tun_read(s->tun_fd, start, size);
682
683 hex_dump("Rx pkt", start, pack, NULL);
684
685 if (opt_if_mode == IF_MODE_TAP) {
686 if (process_arp(s, start)) {
687 return;
688 }
689 if (process_non_ip(s, start)) {
690 return;
691 }
692 pack -= ETH_HLEN;
693 }
694
695 if (pack == -1) {
696 syslog(LOG_ERR, "read: %m");
697 return;
698 }
699
700 /* Don't access the contents of the buffer other than byte aligned.
701 * 12: Offset of ip source address in ip header,
702 * 4: Length of IP address */
703 if (!memcmp(global_buffer_rx + MAX_HEADER + 12, &s->dst.s_addr, 4)) {
704 syslog(LOG_ALERT, "routing loop to %s",
705 inet_ntoa(s->dst));
706 return;
707 }
708
709 /* Encapsulate and send to the other end of the tunnel */
710 s->ipsec.life.tx += pack;
711 s->ipsec.em->send_peer(s, global_buffer_rx, pack);
712 }
713
714 static void process_socket(struct sa_block *s)
715 {
716 /* Receive a packet from a socket */
717 int pack;
718 uint8_t *start = global_buffer_tx;
719 esp_encap_header_t *eh;
720
721 if (opt_if_mode == IF_MODE_TAP) {
722 start += ETH_HLEN;
723 }
724
725 pack = s->ipsec.em->recv(s, start, MAX_HEADER + MAX_PACKET);
726 if (pack == -1)
727 return;
728
729 eh = (esp_encap_header_t *) (s->ipsec.rx.buf + s->ipsec.rx.bufpayload);
730 if (eh->spi == 0) {
731 process_late_ike(s, s->ipsec.rx.buf + s->ipsec.rx.bufpayload + 4 /* SPI-size */,
732 s->ipsec.rx.buflen - s->ipsec.rx.bufpayload - 4);
733 return;
734 } else if (eh->spi != s->ipsec.rx.spi) {
735 syslog(LOG_NOTICE, "unknown spi %#08x from peer", ntohl(eh->spi));
736 return;
737 }
738
739 /* Check auth digest and/or decrypt */
740 if (s->ipsec.em->recv_peer(s) != 0)
741 return;
742
743 if (encap_any_decap(s) == 0) {
744 syslog(LOG_DEBUG, "received update probe from peer");
745 } else {
746 /* Send the decapsulated packet to the tunnel interface */
747 s->ipsec.life.rx += s->ipsec.rx.buflen;
748 tun_send_ip(s);
749 }
750 }
751
752 #if defined(__CYGWIN__)
753 static void *tun_thread (void *arg)
754 {
755 struct sa_block *s = (struct sa_block *) arg;
756
757 while (!do_kill) {
758 process_tun(s);
759 }
760 return NULL;
761 }
762 #endif
763
764 static void vpnc_main_loop(struct sa_block *s)
765 {
766 fd_set rfds, refds;
767 int nfds=0;
768 int enable_keepalives;
769 int timed_mode;
770 ssize_t len;
771 struct timeval select_timeout;
772 struct timeval normal_timeout;
773 time_t next_ike_keepalive=0;
774 time_t next_ike_dpd=0;
775 #if defined(__CYGWIN__)
776 pthread_t tid;
777 #endif
778
779 /* non-esp marker, nat keepalive payload (0xFF) */
780 uint8_t keepalive_v2[5] = { 0x00, 0x00, 0x00, 0x00, 0xFF };
781 uint8_t keepalive_v1[1] = { 0xFF };
782 uint8_t *keepalive;
783 size_t keepalive_size;
784
785 if (s->ipsec.natt_active_mode == NATT_ACTIVE_DRAFT_OLD) {
786 keepalive = keepalive_v1;
787 keepalive_size = sizeof(keepalive_v1);
788 } else { /* active_mode is either RFC or CISCO_UDP */
789 keepalive = keepalive_v2;
790 keepalive_size = sizeof(keepalive_v2);
791 }
792
793 /* send keepalives if UDP encapsulation is enabled */
794 enable_keepalives = (s->ipsec.encap_mode != IPSEC_ENCAP_TUNNEL);
795
796 /* regular wakeups if keepalives on ike or dpd active */
797 timed_mode = ((enable_keepalives && s->ike_fd != s->esp_fd) || s->ike.do_dpd);
798
799 FD_ZERO(&rfds);
800
801 #if !defined(__CYGWIN__)
802 FD_SET(s->tun_fd, &rfds);
803 nfds = MAX(nfds, s->tun_fd +1);
804 #endif
805
806 FD_SET(s->esp_fd, &rfds);
807 nfds = MAX(nfds, s->esp_fd +1);
808
809 if (s->ike_fd != s->esp_fd) {
810 FD_SET(s->ike_fd, &rfds);
811 nfds = MAX(nfds, s->ike_fd +1);
812 }
813
814 #if defined(__CYGWIN__)
815 if (pthread_create(&tid, NULL, tun_thread, s)) {
816 syslog(LOG_ERR, "Cannot create tun thread!\n");
817 return;
818 }
819 #endif
820
821 normal_timeout.tv_sec = 86400;
822 normal_timeout.tv_usec = 0;
823
824 if (s->ike.do_dpd) {
825 /* send initial dpd request */
826 next_ike_dpd = time(NULL) + s->ike.dpd_idle;
827 dpd_ike(s);
828 normal_timeout.tv_sec = s->ike.dpd_idle;
829 normal_timeout.tv_usec = 0;
830 }
831
832 if (enable_keepalives) {
833 normal_timeout.tv_sec = 9;
834 normal_timeout.tv_usec = 500000;
835
836 if (s->ike_fd != s->esp_fd) {
837 /* send initial nat ike keepalive packet */
838 next_ike_keepalive = time(NULL) + 9;
839 keepalive_ike(s);
840 }
841 }
842
843 select_timeout = normal_timeout;
844
845 while (!do_kill) {
846 int presult;
847
848 do {
849 struct timeval *tvp = NULL;
850 FD_COPY(&rfds, &refds);
851 if (s->ike.do_dpd || enable_keepalives)
852 tvp = &select_timeout;
853 presult = select(nfds, &refds, NULL, NULL, tvp);
854 if (presult == 0 && (s->ike.do_dpd || enable_keepalives)) {
855 /* reset to max timeout */
856 select_timeout = normal_timeout;
857 if (enable_keepalives) {
858 if (s->ike_fd != s->esp_fd) {
859 /* send nat ike keepalive packet */
860 next_ike_keepalive = time(NULL) + 9;
861 keepalive_ike(s);
862 }
863 /* send nat keepalive packet */
864 if (send(s->esp_fd, keepalive, keepalive_size, 0) == -1) {
865 syslog(LOG_ERR, "keepalive sendto: %m");
866 }
867 }
868 if (s->ike.do_dpd) {
869 time_t now = time(NULL);
870 if (s->ike.dpd_seqno != s->ike.dpd_seqno_ack) {
871 /* Wake up more often for dpd attempts */
872 select_timeout.tv_sec = 5;
873 select_timeout.tv_usec = 0;
874 dpd_ike(s);
875 next_ike_dpd = now + s->ike.dpd_idle;
876 }
877 else if (now >= next_ike_dpd) {
878 dpd_ike(s);
879 next_ike_dpd = now + s->ike.dpd_idle;
880 }
881 }
882 }
883 DEBUG(2,printf("lifetime status: %ld of %u seconds used, %u|%u of %u kbytes used\n",
884 time(NULL) - s->ipsec.life.start,
885 s->ipsec.life.seconds,
886 s->ipsec.life.rx/1024,
887 s->ipsec.life.tx/1024,
888 s->ipsec.life.kbytes));
889 } while ((presult == 0 || (presult == -1 && errno == EINTR)) && !do_kill);
890 if (presult == -1) {
891 syslog(LOG_ERR, "select: %m");
892 continue;
893 }
894
895 #if !defined(__CYGWIN__)
896 if (FD_ISSET(s->tun_fd, &refds)) {
897 process_tun(s);
898 }
899 #endif
900
901 if (FD_ISSET(s->esp_fd, &refds) ) {
902 process_socket(s);
903 }
904
905 if (s->ike_fd != s->esp_fd && FD_ISSET(s->ike_fd, &refds) ) {
906 DEBUG(3,printf("received something on ike fd..\n"));
907 len = recv(s->ike_fd, global_buffer_tx, MAX_HEADER + MAX_PACKET, 0);
908 process_late_ike(s, global_buffer_tx, len);
909 }
910
911 if (timed_mode) {
912 time_t now = time(NULL);
913 time_t next_up = now + 86400;
914 if (enable_keepalives) {
915 /* never wait more than 9 seconds for a UDP keepalive */
916 next_up = now + 9;
917 if (s->ike_fd != s->esp_fd) {
918 if (now >= next_ike_keepalive) {
919 /* send nat ike keepalive packet now */
920 next_ike_keepalive = now + 9;
921 keepalive_ike(s);
922 select_timeout = normal_timeout;
923 }
924 if (next_ike_keepalive < next_up)
925 next_up = next_ike_keepalive;
926 }
927 }
928 if (s->ike.do_dpd) {
929 if (s->ike.dpd_seqno != s->ike.dpd_seqno_ack) {
930 dpd_ike(s);
931 next_ike_dpd = now + s->ike.dpd_idle;
932 if (now + 5 < next_up)
933 next_up = now + 5;
934 }
935 else if (now >= next_ike_dpd) {
936 dpd_ike(s);
937 next_ike_dpd = now + s->ike.dpd_idle;
938 }
939 if (next_ike_dpd < next_up)
940 next_up = next_ike_dpd;
941 }
942 /* Reduce timeout so next activity happens on schedule */
943 select_timeout.tv_sec = next_up - now;
944 select_timeout.tv_usec = 0;
945 }
946
947 }
948
949 switch (do_kill) {
950 case -2:
951 syslog(LOG_NOTICE, "connection terminated by dead peer detection");
952 break;
953 case -1:
954 syslog(LOG_NOTICE, "connection terminated by peer");
955 break;
956 default:
957 syslog(LOG_NOTICE, "terminated by signal: %d", do_kill);
958 break;
959 }
960 }
961
962 static void killit(int signum)
963 {
964 do_kill = signum;
965 }
966
967 static void write_pidfile(const char *pidfile)
968 {
969 FILE *pf;
970
971 if (pidfile == NULL || pidfile[0] == '\0')
972 return;
973
974 pf = fopen(pidfile, "w");
975 if (pf == NULL) {
976 syslog(LOG_WARNING, "can't open pidfile %s for writing", pidfile);
977 return;
978 }
979
980 fprintf(pf, "%d\n", (int)getpid());
981 fclose(pf);
982 }
983
984 void vpnc_doit(struct sa_block *s)
985 {
986 struct sigaction act;
987 struct encap_method meth;
988
989 const char *pidfile = config[CONFIG_PID_FILE];
990
991 switch (s->ipsec.encap_mode) {
992 case IPSEC_ENCAP_TUNNEL:
993 encap_esp_new(&meth);
994 gcry_create_nonce(&s->ipsec.ip_id, sizeof(uint16_t));
995 break;
996 case IPSEC_ENCAP_UDP_TUNNEL:
997 case IPSEC_ENCAP_UDP_TUNNEL_OLD:
998 encap_udp_new(&meth);
999 break;
1000 default:
1001 abort();
1002 }
1003 s->ipsec.em = &meth;
1004
1005 s->ipsec.rx.key_cry = s->ipsec.rx.key;
1006 hex_dump("rx.key_cry", s->ipsec.rx.key_cry, s->ipsec.key_len, NULL);
1007
1008 s->ipsec.rx.key_md = s->ipsec.rx.key + s->ipsec.key_len;
1009 hex_dump("rx.key_md", s->ipsec.rx.key_md, s->ipsec.md_len, NULL);
1010
1011 if (s->ipsec.cry_algo) {
1012 gcry_cipher_open(&s->ipsec.rx.cry_ctx, s->ipsec.cry_algo, GCRY_CIPHER_MODE_CBC, 0);
1013 gcry_cipher_setkey(s->ipsec.rx.cry_ctx, s->ipsec.rx.key_cry, s->ipsec.key_len);
1014 } else {
1015 s->ipsec.rx.cry_ctx = NULL;
1016 }
1017
1018 s->ipsec.tx.key_cry = s->ipsec.tx.key;
1019 hex_dump("tx.key_cry", s->ipsec.tx.key_cry, s->ipsec.key_len, NULL);
1020
1021 s->ipsec.tx.key_md = s->ipsec.tx.key + s->ipsec.key_len;
1022 hex_dump("tx.key_md", s->ipsec.tx.key_md, s->ipsec.md_len, NULL);
1023
1024 if (s->ipsec.cry_algo) {
1025 gcry_cipher_open(&s->ipsec.tx.cry_ctx, s->ipsec.cry_algo, GCRY_CIPHER_MODE_CBC, 0);
1026 gcry_cipher_setkey(s->ipsec.tx.cry_ctx, s->ipsec.tx.key_cry, s->ipsec.key_len);
1027 } else {
1028 s->ipsec.tx.cry_ctx = NULL;
1029 }
1030
1031 DEBUG(2, printf("remote -> local spi: %#08x\n", ntohl(s->ipsec.rx.spi)));
1032 DEBUG(2, printf("local -> remote spi: %#08x\n", ntohl(s->ipsec.tx.spi)));
1033
1034 do_kill = 0;
1035
1036 sigaction(SIGHUP, NULL, &act);
1037 if (act.sa_handler == SIG_DFL)
1038 signal(SIGHUP, killit);
1039
1040 signal(SIGINT, killit);
1041 signal(SIGTERM, killit);
1042
1043 chdir("/");
1044
1045 if (!opt_nd) {
1046 pid_t pid;
1047 if ((pid = fork()) < 0) {
1048 fprintf(stderr, "Warning, could not fork the child process!\n");
1049 } else if (pid == 0) {
1050 close(0); open("/dev/null", O_RDONLY, 0666);
1051 close(1); open("/dev/null", O_WRONLY, 0666);
1052 close(2); open("/dev/null", O_WRONLY, 0666);
1053 setsid();
1054 } else {
1055 printf("VPNC started in background (pid: %d)...\n", (int)pid);
1056 exit(0);
1057 }
1058 } else {
1059 printf("VPNC started in foreground...\n");
1060 }
1061 openlog("vpnc", LOG_PID | LOG_PERROR, LOG_DAEMON);
1062 write_pidfile(pidfile);
1063
1064 vpnc_main_loop(s);
1065
1066 if (pidfile)
1067 unlink(pidfile); /* ignore errors */
1068 }