summaryrefslogtreecommitdiffstats
path: root/net/xfrm
diff options
context:
space:
mode:
authorSteffen Klassert <steffen.klassert@secunet.com>2017-12-20 10:41:36 +0100
committerSteffen Klassert <steffen.klassert@secunet.com>2017-12-20 10:41:36 +0100
commitf53c723902d1ac5f0b0a11d7c9dcbff748dde74e (patch)
tree50841e9a0ff29530ee84e262857333925e09f2ff /net/xfrm
parent3dca3f38cfb8efb8571040568cac7d0025fa5bb1 (diff)
downloadlinux-f53c723902d1ac5f0b0a11d7c9dcbff748dde74e.tar.bz2
net: Add asynchronous callbacks for xfrm on layer 2.
This patch implements asynchronous crypto callbacks and a backlog handler that can be used when IPsec is done at layer 2 in the TX path. It also extends the skb validate functions so that we can update the driver transmit return codes based on async crypto operation or to indicate that we queued the packet in a backlog queue. Joint work with: Aviv Heller <avivh@mellanox.com> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/xfrm')
-rw-r--r--net/xfrm/xfrm_device.c100
1 files changed, 82 insertions, 18 deletions
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index a5a7a716c465..fc8ab9f71127 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -23,12 +23,13 @@
#include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD
-struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
int err;
- __u32 seq;
+ unsigned long flags;
struct xfrm_state *x;
struct sk_buff *skb2;
+ struct softnet_data *sd;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
@@ -42,6 +43,16 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return skb;
+ local_irq_save(flags);
+ sd = this_cpu_ptr(&softnet_data);
+ err = !skb_queue_empty(&sd->xfrm_backlog);
+ local_irq_restore(flags);
+
+ if (err) {
+ *again = true;
+ return skb;
+ }
+
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
@@ -54,23 +65,26 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
segs = skb_gso_segment(skb, esp_features);
if (IS_ERR(segs)) {
- XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
+ atomic_long_inc(&dev->tx_dropped);
return NULL;
} else {
consume_skb(skb);
skb = segs;
}
- } else {
- return skb;
}
}
if (!skb->next) {
x->outer_mode->xmit(x, skb);
+ xo->flags |= XFRM_DEV_RESUME;
+
err = x->type_offload->xmit(x, skb, esp_features);
if (err) {
+ if (err == -EINPROGRESS)
+ return NULL;
+
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return NULL;
@@ -82,36 +96,37 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
}
skb2 = skb;
- seq = xo->seq.low;
do {
struct sk_buff *nskb = skb2->next;
+ skb2->next = NULL;
xo = xfrm_offload(skb2);
- xo->flags |= XFRM_GSO_SEGMENT;
- xo->seq.low = seq;
- xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
- if(!(features & NETIF_F_HW_ESP))
- xo->flags |= CRYPTO_FALLBACK;
+ xo->flags |= XFRM_DEV_RESUME;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
- if (err) {
+ if (!err) {
+ skb2->next = nskb;
+ } else if (err != -EINPROGRESS) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
skb2->next = nskb;
kfree_skb_list(skb2);
return NULL;
- }
+ } else {
+ if (skb == skb2)
+ skb = nskb;
+
+ if (!skb)
+ return NULL;
- if (!skb_is_gso(skb2))
- seq++;
- else
- seq += skb_shinfo(skb2)->gso_segs;
+ goto skip_push;
+ }
skb_push(skb2, skb2->data - skb_mac_header(skb2));
+skip_push:
skb2 = nskb;
} while (skb2);
@@ -207,6 +222,55 @@ ok:
return true;
}
EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ int ret = NETDEV_TX_BUSY;
+ struct netdev_queue *txq;
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ rcu_read_lock();
+ txq = netdev_pick_tx(dev, skb, NULL);
+
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ if (!netif_xmit_frozen_or_stopped(txq))
+ skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ HARD_TX_UNLOCK(dev, txq);
+
+ if (!dev_xmit_complete(ret)) {
+ local_irq_save(flags);
+ sd = this_cpu_ptr(&softnet_data);
+ skb_queue_tail(&sd->xfrm_backlog, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+ struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(xfrm_backlog))
+ return;
+
+ __skb_queue_head_init(&list);
+
+ spin_lock(&xfrm_backlog->lock);
+ skb_queue_splice_init(xfrm_backlog, &list);
+ spin_unlock(&xfrm_backlog->lock);
+
+ while (!skb_queue_empty(&list)) {
+ skb = __skb_dequeue(&list);
+ xfrm_dev_resume(skb);
+ }
+
+}
#endif
static int xfrm_dev_register(struct net_device *dev)