summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-04-08 12:27:26 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-08 12:27:26 -0400
commit803afb316eba2df2bba16d984eb90b67c147c922 (patch)
treef305fdc53296701b966d07376edda69a3af7ec27
parent8ae178fb1ce0428d1a22c36c8493bda4807ad3b8 (diff)
parente88f7e078e47d4261a22e6f20a574620cbfc7a4b (diff)
downloadlinux-803afb316eba2df2bba16d984eb90b67c147c922.tar.bz2
Merge branch 'hv_netvsc_linearize'
Vitaly Kuznetsov says: ==================== hv_netvsc: linearize SKBs bigger than MAX_PAGE_BUFFER_COUNT-2 pages This patch series fixes the same issue which was fixed in Xen with commit 97a6d1bb2b658ac85ed88205ccd1ab809899884d ("xen-netfront: Fix handling packets on compound pages with skb_linearize"). It is relatively easy to create a packet which is small in size but occupies more than 30 (MAX_PAGE_BUFFER_COUNT-2) pages. Here is a kernel-mode reproducer which tries sending a packet with only 34 bytes of payload (but on 34 pages) and fails: static int __init sendfb_init(void) { struct socket *sock; int i, ret; struct sockaddr_in in4_addr = { 0 }; struct page *pages[17]; unsigned long flags; ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); if (ret) { pr_err("failed to create socket: %d!\n", ret); return ret; } in4_addr.sin_family = AF_INET; /* www.google.com, 74.125.133.99 */ in4_addr.sin_addr.s_addr = cpu_to_be32(0x4a7d8563); in4_addr.sin_port = cpu_to_be16(80); ret = sock->ops->connect(sock, (struct sockaddr *)&in4_addr, sizeof(in4_addr), 0); if (ret) { pr_err("failed to connect: %d!\n", ret); return ret; } /* We can send up to 17 frags */ flags = MSG_MORE; for (i = 0; i < 17; i++) { if (i == 16) flags = MSG_EOR; pages[i] = alloc_pages(GFP_KERNEL | __GFP_COMP, 1); if (!pages[i]) { pr_err("out of memory!"); goto free_pages; } sock->ops->sendpage(sock, pages[i], PAGE_SIZE -1, 2, flags); } free_pages: for (; i > 0; i--) __free_pages(pages[i - 1], 1); printk("sendfb_init: test done\n"); return -1; } module_init(sendfb_init); MODULE_LICENSE("GPL"); A try to load such module results in multiple 'kernel: hv_netvsc vmbus_15 eth0: Packet too big: 100' messages as all retries fail as well. It should also be possible to trigger the issue from userspace, I expect e.g. NFS under heavy load to get stuck sometimes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/hyperv/netvsc_drv.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index e5fa094e6fe2..448716787e73 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -370,35 +370,49 @@ not_ip:
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_netvsc_packet *packet;
+ struct hv_netvsc_packet *packet = NULL;
int ret;
unsigned int num_data_pgs;
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
bool isvlan;
+ bool linear = false;
struct rndis_per_packet_info *ppi;
struct ndis_tcp_ip_checksum_info *csum_info;
struct ndis_tcp_lso_info *lso_info;
int hdr_offset;
u32 net_trans_info;
u32 hash;
- u32 skb_length = skb->len;
- u32 head_room = skb_headroom(skb);
+ u32 skb_length;
+ u32 head_room;
u32 pkt_sz;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
/* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
- * of pages in a single packet.
+ * of pages in a single packet. If skb is scattered around
+ * more pages we try linearizing it.
*/
+
+check_size:
+ skb_length = skb->len;
+ head_room = skb_headroom(skb);
num_data_pgs = netvsc_get_slots(skb) + 2;
- if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
- netdev_err(net, "Packet too big: %u\n", skb->len);
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
- return NETDEV_TX_OK;
+ if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
+ net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
+ num_data_pgs, skb->len);
+ ret = -EFAULT;
+ goto drop;
+ } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+ if (skb_linearize(skb)) {
+ net_alert_ratelimited("failed to linearize skb\n");
+ ret = -ENOMEM;
+ goto drop;
+ }
+ linear = true;
+ goto check_size;
}
pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
@@ -408,9 +422,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
if (!packet) {
/* out of memory, drop packet */
netdev_err(net, "unable to alloc hv_netvsc_packet\n");
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
- return NETDEV_TX_OK;
+ ret = -ENOMEM;
+ goto drop;
}
packet->part_of_skb = false;
} else {
@@ -574,7 +587,7 @@ drop:
net->stats.tx_bytes += skb_length;
net->stats.tx_packets++;
} else {
- if (!packet->part_of_skb)
+ if (packet && !packet->part_of_skb)
kfree(packet);
if (ret != -EAGAIN) {
dev_kfree_skb_any(skb);