6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl);
26 u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
27 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
29 u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
32 /* Each xfrm_state may be linked to two tables:
34 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
35 2. Hash table by daddr to find what SAs exist for given
36 destination/tunnel endpoint. (output)
39 static DEFINE_SPINLOCK(xfrm_state_lock);
41 /* Hash table to find appropriate SA towards given target (endpoint
42 * of tunnel or destination of transport mode) allowed by selector.
44 * Main use is finding SA after policy selected tunnel or transport mode.
45 * Also, it can be used by ah/esp icmp error handler to find offending SA.
47 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
48 static struct list_head xfrm_state_bysrc[XFRM_DST_HSIZE];
49 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
51 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
52 EXPORT_SYMBOL(km_waitq);
54 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
55 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
57 static struct work_struct xfrm_state_gc_work;
58 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
59 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
61 static int xfrm_state_gc_flush_bundles;
63 int __xfrm_state_delete(struct xfrm_state *x);
65 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
66 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
68 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
69 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
71 static void xfrm_state_gc_destroy(struct xfrm_state *x)
73 if (del_timer(&x->timer))
75 if (del_timer(&x->rtimer))
82 xfrm_put_mode(x->mode);
84 x->type->destructor(x);
85 xfrm_put_type(x->type);
87 security_xfrm_state_free(x);
91 static void xfrm_state_gc_task(void *data)
94 struct list_head *entry, *tmp;
95 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
97 if (xfrm_state_gc_flush_bundles) {
98 xfrm_state_gc_flush_bundles = 0;
102 spin_lock_bh(&xfrm_state_gc_lock);
103 list_splice_init(&xfrm_state_gc_list, &gc_list);
104 spin_unlock_bh(&xfrm_state_gc_lock);
106 list_for_each_safe(entry, tmp, &gc_list) {
107 x = list_entry(entry, struct xfrm_state, bydst);
108 xfrm_state_gc_destroy(x);
113 static inline unsigned long make_jiffies(long secs)
115 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
116 return MAX_SCHEDULE_TIMEOUT-1;
121 static void xfrm_timer_handler(unsigned long data)
123 struct xfrm_state *x = (struct xfrm_state*)data;
124 unsigned long now = (unsigned long)xtime.tv_sec;
125 long next = LONG_MAX;
129 if (x->km.state == XFRM_STATE_DEAD)
131 if (x->km.state == XFRM_STATE_EXPIRED)
133 if (x->lft.hard_add_expires_seconds) {
134 long tmo = x->lft.hard_add_expires_seconds +
135 x->curlft.add_time - now;
141 if (x->lft.hard_use_expires_seconds) {
142 long tmo = x->lft.hard_use_expires_seconds +
143 (x->curlft.use_time ? : now) - now;
151 if (x->lft.soft_add_expires_seconds) {
152 long tmo = x->lft.soft_add_expires_seconds +
153 x->curlft.add_time - now;
159 if (x->lft.soft_use_expires_seconds) {
160 long tmo = x->lft.soft_use_expires_seconds +
161 (x->curlft.use_time ? : now) - now;
170 km_state_expired(x, 0, 0);
172 if (next != LONG_MAX &&
173 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
178 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
179 x->km.state = XFRM_STATE_EXPIRED;
184 if (!__xfrm_state_delete(x) && x->id.spi)
185 km_state_expired(x, 1, 0);
188 spin_unlock(&x->lock);
192 static void xfrm_replay_timer_handler(unsigned long data);
194 struct xfrm_state *xfrm_state_alloc(void)
196 struct xfrm_state *x;
198 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
201 atomic_set(&x->refcnt, 1);
202 atomic_set(&x->tunnel_users, 0);
203 INIT_LIST_HEAD(&x->bydst);
204 INIT_LIST_HEAD(&x->bysrc);
205 INIT_LIST_HEAD(&x->byspi);
206 init_timer(&x->timer);
207 x->timer.function = xfrm_timer_handler;
208 x->timer.data = (unsigned long)x;
209 init_timer(&x->rtimer);
210 x->rtimer.function = xfrm_replay_timer_handler;
211 x->rtimer.data = (unsigned long)x;
212 x->curlft.add_time = (unsigned long)xtime.tv_sec;
213 x->lft.soft_byte_limit = XFRM_INF;
214 x->lft.soft_packet_limit = XFRM_INF;
215 x->lft.hard_byte_limit = XFRM_INF;
216 x->lft.hard_packet_limit = XFRM_INF;
217 x->replay_maxage = 0;
218 x->replay_maxdiff = 0;
219 spin_lock_init(&x->lock);
223 EXPORT_SYMBOL(xfrm_state_alloc);
225 void __xfrm_state_destroy(struct xfrm_state *x)
227 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
229 spin_lock_bh(&xfrm_state_gc_lock);
230 list_add(&x->bydst, &xfrm_state_gc_list);
231 spin_unlock_bh(&xfrm_state_gc_lock);
232 schedule_work(&xfrm_state_gc_work);
234 EXPORT_SYMBOL(__xfrm_state_destroy);
236 int __xfrm_state_delete(struct xfrm_state *x)
240 if (x->km.state != XFRM_STATE_DEAD) {
241 x->km.state = XFRM_STATE_DEAD;
242 spin_lock(&xfrm_state_lock);
251 spin_unlock(&xfrm_state_lock);
252 if (del_timer(&x->timer))
254 if (del_timer(&x->rtimer))
257 /* The number two in this test is the reference
258 * mentioned in the comment below plus the reference
259 * our caller holds. A larger value means that
260 * there are DSTs attached to this xfrm_state.
262 if (atomic_read(&x->refcnt) > 2) {
263 xfrm_state_gc_flush_bundles = 1;
264 schedule_work(&xfrm_state_gc_work);
267 /* All xfrm_state objects are created by xfrm_state_alloc.
268 * The xfrm_state_alloc call gives a reference, and that
269 * is what we are dropping here.
277 EXPORT_SYMBOL(__xfrm_state_delete);
279 int xfrm_state_delete(struct xfrm_state *x)
283 spin_lock_bh(&x->lock);
284 err = __xfrm_state_delete(x);
285 spin_unlock_bh(&x->lock);
289 EXPORT_SYMBOL(xfrm_state_delete);
291 void xfrm_state_flush(u8 proto)
294 struct xfrm_state *x;
296 spin_lock_bh(&xfrm_state_lock);
297 for (i = 0; i < XFRM_DST_HSIZE; i++) {
299 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
300 if (!xfrm_state_kern(x) &&
301 xfrm_id_proto_match(x->id.proto, proto)) {
303 spin_unlock_bh(&xfrm_state_lock);
305 xfrm_state_delete(x);
308 spin_lock_bh(&xfrm_state_lock);
313 spin_unlock_bh(&xfrm_state_lock);
316 EXPORT_SYMBOL(xfrm_state_flush);
319 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
320 struct xfrm_tmpl *tmpl,
321 xfrm_address_t *daddr, xfrm_address_t *saddr,
322 unsigned short family)
324 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
327 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
328 xfrm_state_put_afinfo(afinfo);
333 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
334 struct flowi *fl, struct xfrm_tmpl *tmpl,
335 struct xfrm_policy *pol, int *err,
336 unsigned short family)
338 unsigned h = xfrm_dst_hash(daddr, family);
339 struct xfrm_state *x, *x0;
340 int acquire_in_progress = 0;
342 struct xfrm_state *best = NULL;
343 struct xfrm_state_afinfo *afinfo;
345 afinfo = xfrm_state_get_afinfo(family);
346 if (afinfo == NULL) {
347 *err = -EAFNOSUPPORT;
351 spin_lock_bh(&xfrm_state_lock);
352 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
353 if (x->props.family == family &&
354 x->props.reqid == tmpl->reqid &&
355 xfrm_state_addr_check(x, daddr, saddr, family) &&
356 tmpl->mode == x->props.mode &&
357 tmpl->id.proto == x->id.proto &&
358 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
360 1. There is a valid state with matching selector.
362 2. Valid state with inappropriate selector. Skip.
364 Entering area of "sysdeps".
366 3. If state is not valid, selector is temporary,
367 it selects only session which triggered
368 previous resolution. Key manager will do
369 something to install a state with proper
372 if (x->km.state == XFRM_STATE_VALID) {
373 if (!xfrm_selector_match(&x->sel, fl, family) ||
374 !security_xfrm_state_pol_flow_match(x, pol, fl))
377 best->km.dying > x->km.dying ||
378 (best->km.dying == x->km.dying &&
379 best->curlft.add_time < x->curlft.add_time))
381 } else if (x->km.state == XFRM_STATE_ACQ) {
382 acquire_in_progress = 1;
383 } else if (x->km.state == XFRM_STATE_ERROR ||
384 x->km.state == XFRM_STATE_EXPIRED) {
385 if (xfrm_selector_match(&x->sel, fl, family) &&
386 security_xfrm_state_pol_flow_match(x, pol, fl))
393 if (!x && !error && !acquire_in_progress) {
395 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
396 tmpl->id.proto)) != NULL) {
401 x = xfrm_state_alloc();
406 /* Initialize temporary selector matching only
407 * to current session. */
408 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
410 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
412 x->km.state = XFRM_STATE_DEAD;
418 if (km_query(x, tmpl, pol) == 0) {
419 x->km.state = XFRM_STATE_ACQ;
420 list_add_tail(&x->bydst, xfrm_state_bydst+h);
422 list_add_tail(&x->bysrc, xfrm_state_bysrc+h);
425 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
426 list_add(&x->byspi, xfrm_state_byspi+h);
429 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
431 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
432 add_timer(&x->timer);
434 x->km.state = XFRM_STATE_DEAD;
444 *err = acquire_in_progress ? -EAGAIN : error;
445 spin_unlock_bh(&xfrm_state_lock);
446 xfrm_state_put_afinfo(afinfo);
450 static void __xfrm_state_insert(struct xfrm_state *x)
452 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
454 list_add(&x->bydst, xfrm_state_bydst+h);
457 h = xfrm_src_hash(&x->props.saddr, x->props.family);
459 list_add(&x->bysrc, xfrm_state_bysrc+h);
462 if (xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY)) {
463 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
466 list_add(&x->byspi, xfrm_state_byspi+h);
470 if (!mod_timer(&x->timer, jiffies + HZ))
473 if (x->replay_maxage &&
474 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
480 void xfrm_state_insert(struct xfrm_state *x)
482 spin_lock_bh(&xfrm_state_lock);
483 __xfrm_state_insert(x);
484 spin_unlock_bh(&xfrm_state_lock);
486 xfrm_flush_all_bundles();
488 EXPORT_SYMBOL(xfrm_state_insert);
490 static inline struct xfrm_state *
491 __xfrm_state_locate(struct xfrm_state_afinfo *afinfo, struct xfrm_state *x,
495 return afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
497 return afinfo->state_lookup_byaddr(&x->id.daddr, &x->props.saddr, x->id.proto);
500 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
502 int xfrm_state_add(struct xfrm_state *x)
504 struct xfrm_state_afinfo *afinfo;
505 struct xfrm_state *x1;
508 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
510 family = x->props.family;
511 afinfo = xfrm_state_get_afinfo(family);
512 if (unlikely(afinfo == NULL))
513 return -EAFNOSUPPORT;
515 spin_lock_bh(&xfrm_state_lock);
517 x1 = __xfrm_state_locate(afinfo, x, use_spi);
525 if (use_spi && x->km.seq) {
526 x1 = __xfrm_find_acq_byseq(x->km.seq);
527 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
534 x1 = afinfo->find_acq(
535 x->props.mode, x->props.reqid, x->id.proto,
536 &x->id.daddr, &x->props.saddr, 0);
538 __xfrm_state_insert(x);
542 spin_unlock_bh(&xfrm_state_lock);
543 xfrm_state_put_afinfo(afinfo);
546 xfrm_flush_all_bundles();
549 xfrm_state_delete(x1);
555 EXPORT_SYMBOL(xfrm_state_add);
557 int xfrm_state_update(struct xfrm_state *x)
559 struct xfrm_state_afinfo *afinfo;
560 struct xfrm_state *x1;
562 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
564 afinfo = xfrm_state_get_afinfo(x->props.family);
565 if (unlikely(afinfo == NULL))
566 return -EAFNOSUPPORT;
568 spin_lock_bh(&xfrm_state_lock);
569 x1 = __xfrm_state_locate(afinfo, x, use_spi);
575 if (xfrm_state_kern(x1)) {
581 if (x1->km.state == XFRM_STATE_ACQ) {
582 __xfrm_state_insert(x);
588 spin_unlock_bh(&xfrm_state_lock);
589 xfrm_state_put_afinfo(afinfo);
595 xfrm_state_delete(x1);
601 spin_lock_bh(&x1->lock);
602 if (likely(x1->km.state == XFRM_STATE_VALID)) {
603 if (x->encap && x1->encap)
604 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
605 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
608 if (!mod_timer(&x1->timer, jiffies + HZ))
610 if (x1->curlft.use_time)
611 xfrm_state_check_expire(x1);
615 spin_unlock_bh(&x1->lock);
621 EXPORT_SYMBOL(xfrm_state_update);
623 int xfrm_state_check_expire(struct xfrm_state *x)
625 if (!x->curlft.use_time)
626 x->curlft.use_time = (unsigned long)xtime.tv_sec;
628 if (x->km.state != XFRM_STATE_VALID)
631 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
632 x->curlft.packets >= x->lft.hard_packet_limit) {
633 x->km.state = XFRM_STATE_EXPIRED;
634 if (!mod_timer(&x->timer, jiffies))
640 (x->curlft.bytes >= x->lft.soft_byte_limit ||
641 x->curlft.packets >= x->lft.soft_packet_limit)) {
643 km_state_expired(x, 0, 0);
647 EXPORT_SYMBOL(xfrm_state_check_expire);
649 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
651 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
655 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
657 /* Check tail too... */
661 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
663 int err = xfrm_state_check_expire(x);
666 err = xfrm_state_check_space(x, skb);
670 EXPORT_SYMBOL(xfrm_state_check);
673 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
674 unsigned short family)
676 struct xfrm_state *x;
677 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
681 spin_lock_bh(&xfrm_state_lock);
682 x = afinfo->state_lookup(daddr, spi, proto);
683 spin_unlock_bh(&xfrm_state_lock);
684 xfrm_state_put_afinfo(afinfo);
687 EXPORT_SYMBOL(xfrm_state_lookup);
690 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
691 u8 proto, unsigned short family)
693 struct xfrm_state *x;
694 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
698 spin_lock_bh(&xfrm_state_lock);
699 x = afinfo->state_lookup_byaddr(daddr, saddr, proto);
700 spin_unlock_bh(&xfrm_state_lock);
701 xfrm_state_put_afinfo(afinfo);
704 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
707 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
708 xfrm_address_t *daddr, xfrm_address_t *saddr,
709 int create, unsigned short family)
711 struct xfrm_state *x;
712 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
716 spin_lock_bh(&xfrm_state_lock);
717 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
718 spin_unlock_bh(&xfrm_state_lock);
719 xfrm_state_put_afinfo(afinfo);
722 EXPORT_SYMBOL(xfrm_find_acq);
724 /* Silly enough, but I'm lazy to build resolution list */
726 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
729 struct xfrm_state *x;
731 for (i = 0; i < XFRM_DST_HSIZE; i++) {
732 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
733 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
742 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
744 struct xfrm_state *x;
746 spin_lock_bh(&xfrm_state_lock);
747 x = __xfrm_find_acq_byseq(seq);
748 spin_unlock_bh(&xfrm_state_lock);
751 EXPORT_SYMBOL(xfrm_find_acq_byseq);
753 u32 xfrm_get_acqseq(void)
757 static DEFINE_SPINLOCK(acqseq_lock);
759 spin_lock_bh(&acqseq_lock);
760 res = (++acqseq ? : ++acqseq);
761 spin_unlock_bh(&acqseq_lock);
764 EXPORT_SYMBOL(xfrm_get_acqseq);
767 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
770 struct xfrm_state *x0;
775 if (minspi == maxspi) {
776 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
784 minspi = ntohl(minspi);
785 maxspi = ntohl(maxspi);
786 for (h=0; h<maxspi-minspi+1; h++) {
787 spi = minspi + net_random()%(maxspi-minspi+1);
788 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
790 x->id.spi = htonl(spi);
797 spin_lock_bh(&xfrm_state_lock);
798 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
799 list_add(&x->byspi, xfrm_state_byspi+h);
801 spin_unlock_bh(&xfrm_state_lock);
805 EXPORT_SYMBOL(xfrm_alloc_spi);
807 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
811 struct xfrm_state *x;
815 spin_lock_bh(&xfrm_state_lock);
816 for (i = 0; i < XFRM_DST_HSIZE; i++) {
817 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
818 if (xfrm_id_proto_match(x->id.proto, proto))
827 for (i = 0; i < XFRM_DST_HSIZE; i++) {
828 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
829 if (!xfrm_id_proto_match(x->id.proto, proto))
831 err = func(x, --count, data);
837 spin_unlock_bh(&xfrm_state_lock);
840 EXPORT_SYMBOL(xfrm_state_walk);
843 void xfrm_replay_notify(struct xfrm_state *x, int event)
846 /* we send notify messages in case
847 * 1. we updated on of the sequence numbers, and the seqno difference
848 * is at least x->replay_maxdiff, in this case we also update the
849 * timeout of our timer function
850 * 2. if x->replay_maxage has elapsed since last update,
851 * and there were changes
853 * The state structure must be locked!
857 case XFRM_REPLAY_UPDATE:
858 if (x->replay_maxdiff &&
859 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
860 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
861 if (x->xflags & XFRM_TIME_DEFER)
862 event = XFRM_REPLAY_TIMEOUT;
869 case XFRM_REPLAY_TIMEOUT:
870 if ((x->replay.seq == x->preplay.seq) &&
871 (x->replay.bitmap == x->preplay.bitmap) &&
872 (x->replay.oseq == x->preplay.oseq)) {
873 x->xflags |= XFRM_TIME_DEFER;
880 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
881 c.event = XFRM_MSG_NEWAE;
882 c.data.aevent = event;
883 km_state_notify(x, &c);
885 if (x->replay_maxage &&
886 !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) {
888 x->xflags &= ~XFRM_TIME_DEFER;
891 EXPORT_SYMBOL(xfrm_replay_notify);
893 static void xfrm_replay_timer_handler(unsigned long data)
895 struct xfrm_state *x = (struct xfrm_state*)data;
899 if (x->km.state == XFRM_STATE_VALID) {
900 if (xfrm_aevent_is_on())
901 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
903 x->xflags |= XFRM_TIME_DEFER;
906 spin_unlock(&x->lock);
910 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
916 if (unlikely(seq == 0))
919 if (likely(seq > x->replay.seq))
922 diff = x->replay.seq - seq;
923 if (diff >= x->props.replay_window) {
924 x->stats.replay_window++;
928 if (x->replay.bitmap & (1U << diff)) {
934 EXPORT_SYMBOL(xfrm_replay_check);
936 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
942 if (seq > x->replay.seq) {
943 diff = seq - x->replay.seq;
944 if (diff < x->props.replay_window)
945 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
947 x->replay.bitmap = 1;
950 diff = x->replay.seq - seq;
951 x->replay.bitmap |= (1U << diff);
954 if (xfrm_aevent_is_on())
955 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
957 EXPORT_SYMBOL(xfrm_replay_advance);
959 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
960 static DEFINE_RWLOCK(xfrm_km_lock);
962 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
966 read_lock(&xfrm_km_lock);
967 list_for_each_entry(km, &xfrm_km_list, list)
968 if (km->notify_policy)
969 km->notify_policy(xp, dir, c);
970 read_unlock(&xfrm_km_lock);
973 void km_state_notify(struct xfrm_state *x, struct km_event *c)
976 read_lock(&xfrm_km_lock);
977 list_for_each_entry(km, &xfrm_km_list, list)
980 read_unlock(&xfrm_km_lock);
983 EXPORT_SYMBOL(km_policy_notify);
984 EXPORT_SYMBOL(km_state_notify);
986 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
992 c.event = XFRM_MSG_EXPIRE;
993 km_state_notify(x, &c);
999 EXPORT_SYMBOL(km_state_expired);
1001 * We send to all registered managers regardless of failure
1002 * We are happy with one success
1004 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1006 int err = -EINVAL, acqret;
1007 struct xfrm_mgr *km;
1009 read_lock(&xfrm_km_lock);
1010 list_for_each_entry(km, &xfrm_km_list, list) {
1011 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1015 read_unlock(&xfrm_km_lock);
1018 EXPORT_SYMBOL(km_query);
1020 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
1023 struct xfrm_mgr *km;
1025 read_lock(&xfrm_km_lock);
1026 list_for_each_entry(km, &xfrm_km_list, list) {
1027 if (km->new_mapping)
1028 err = km->new_mapping(x, ipaddr, sport);
1032 read_unlock(&xfrm_km_lock);
1035 EXPORT_SYMBOL(km_new_mapping);
1037 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1043 c.event = XFRM_MSG_POLEXPIRE;
1044 km_policy_notify(pol, dir, &c);
1049 EXPORT_SYMBOL(km_policy_expired);
1051 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1055 struct xfrm_mgr *km;
1056 struct xfrm_policy *pol = NULL;
1058 if (optlen <= 0 || optlen > PAGE_SIZE)
1061 data = kmalloc(optlen, GFP_KERNEL);
1066 if (copy_from_user(data, optval, optlen))
1070 read_lock(&xfrm_km_lock);
1071 list_for_each_entry(km, &xfrm_km_list, list) {
1072 pol = km->compile_policy(sk, optname, data,
1077 read_unlock(&xfrm_km_lock);
1080 xfrm_sk_policy_insert(sk, err, pol);
1089 EXPORT_SYMBOL(xfrm_user_policy);
1091 int xfrm_register_km(struct xfrm_mgr *km)
1093 write_lock_bh(&xfrm_km_lock);
1094 list_add_tail(&km->list, &xfrm_km_list);
1095 write_unlock_bh(&xfrm_km_lock);
1098 EXPORT_SYMBOL(xfrm_register_km);
1100 int xfrm_unregister_km(struct xfrm_mgr *km)
1102 write_lock_bh(&xfrm_km_lock);
1103 list_del(&km->list);
1104 write_unlock_bh(&xfrm_km_lock);
1107 EXPORT_SYMBOL(xfrm_unregister_km);
1109 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1112 if (unlikely(afinfo == NULL))
1114 if (unlikely(afinfo->family >= NPROTO))
1115 return -EAFNOSUPPORT;
1116 write_lock_bh(&xfrm_state_afinfo_lock);
1117 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1120 afinfo->state_bydst = xfrm_state_bydst;
1121 afinfo->state_bysrc = xfrm_state_bysrc;
1122 afinfo->state_byspi = xfrm_state_byspi;
1123 xfrm_state_afinfo[afinfo->family] = afinfo;
1125 write_unlock_bh(&xfrm_state_afinfo_lock);
1128 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1130 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1133 if (unlikely(afinfo == NULL))
1135 if (unlikely(afinfo->family >= NPROTO))
1136 return -EAFNOSUPPORT;
1137 write_lock_bh(&xfrm_state_afinfo_lock);
1138 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1139 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1142 xfrm_state_afinfo[afinfo->family] = NULL;
1143 afinfo->state_byspi = NULL;
1144 afinfo->state_bysrc = NULL;
1145 afinfo->state_bydst = NULL;
1148 write_unlock_bh(&xfrm_state_afinfo_lock);
1151 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1153 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1155 struct xfrm_state_afinfo *afinfo;
1156 if (unlikely(family >= NPROTO))
1158 read_lock(&xfrm_state_afinfo_lock);
1159 afinfo = xfrm_state_afinfo[family];
1160 if (unlikely(!afinfo))
1161 read_unlock(&xfrm_state_afinfo_lock);
1165 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1167 read_unlock(&xfrm_state_afinfo_lock);
1170 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1171 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1174 struct xfrm_state *t = x->tunnel;
1176 if (atomic_read(&t->tunnel_users) == 2)
1177 xfrm_state_delete(t);
1178 atomic_dec(&t->tunnel_users);
1183 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1186 * This function is NOT optimal. For example, with ESP it will give an
1187 * MTU that's usually two bytes short of being optimal. However, it will
1188 * usually give an answer that's a multiple of 4 provided the input is
1189 * also a multiple of 4.
1191 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1195 res -= x->props.header_len;
1203 spin_lock_bh(&x->lock);
1204 if (x->km.state == XFRM_STATE_VALID &&
1205 x->type && x->type->get_max_size)
1206 m = x->type->get_max_size(x, m);
1208 m += x->props.header_len;
1209 spin_unlock_bh(&x->lock);
1219 int xfrm_init_state(struct xfrm_state *x)
1221 struct xfrm_state_afinfo *afinfo;
1222 int family = x->props.family;
1225 err = -EAFNOSUPPORT;
1226 afinfo = xfrm_state_get_afinfo(family);
1231 if (afinfo->init_flags)
1232 err = afinfo->init_flags(x);
1234 xfrm_state_put_afinfo(afinfo);
1239 err = -EPROTONOSUPPORT;
1240 x->type = xfrm_get_type(x->id.proto, family);
1241 if (x->type == NULL)
1244 err = x->type->init_state(x);
1248 x->mode = xfrm_get_mode(x->props.mode, family);
1249 if (x->mode == NULL)
1252 x->km.state = XFRM_STATE_VALID;
1258 EXPORT_SYMBOL(xfrm_init_state);
1260 void __init xfrm_state_init(void)
1264 for (i=0; i<XFRM_DST_HSIZE; i++) {
1265 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1266 INIT_LIST_HEAD(&xfrm_state_bysrc[i]);
1267 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1269 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);