From 3c7eacfc8a9a4c2bd48e0093c4f43cf69afd5210 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 14 Feb 2014 11:42:36 +0100 Subject: ovs: fix dp check in ovs_dp_reset_user_features This fixes crash when userspace does "ovs-dpctl add-dp dev" where dev is existing non-dp netdevice. Introduced by: commit 44da5ae5fbea4686f667dc854e5ea16814e44c59 "openvswitch: Drop user features if old user space attempted to create datapath" Signed-off-by: Jiri Pirko Signed-off-by: Jesse Gross --- net/openvswitch/datapath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e9a48baf8551..e42340d3f820 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1174,7 +1174,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in struct datapath *dp; dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); - if (!dp) + if (IS_ERR(dp)) return; WARN(dp->user_features, "Dropping previously announced user features\n"); -- cgit v1.2.3 From 04382a3303c22b0c536fbd0c94c1f012f2b8ed60 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Sat, 15 Feb 2014 17:37:45 -0800 Subject: openvswitch: Read tcp flags only then the tranport header is present. Only the first IP fragment can have a TCP header, check for this. Signed-off-by: Jarno Rajahalme Signed-off-by: Jesse Gross --- net/openvswitch/flow.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 16f4b46161d4..d71e60fa28cb 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -73,6 +73,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) if ((flow->key.eth.type == htons(ETH_P_IP) || flow->key.eth.type == htons(ETH_P_IPV6)) && + flow->key.ip.frag != OVS_FRAG_TYPE_LATER && flow->key.ip.proto == IPPROTO_TCP && likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); -- cgit v1.2.3 From 42ee19e2939277a5277c307e517ce2d7ba5f0703 Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Sat, 15 Feb 2014 17:42:29 -0800 Subject: openvswitch: Fix race. ovs_vport_cmd_dump() did rcu_read_lock() only after getting the datapath, which could have been deleted in between. Resolved by taking rcu_read_lock() before the get_dp() call. Signed-off-by: Jarno Rajahalme Signed-off-by: Pravin B Shelar --- net/openvswitch/datapath.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e42340d3f820..8601b320b443 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1762,11 +1762,12 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int bucket = cb->args[0], skip = cb->args[1]; int i, j = 0; + rcu_read_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); - if (!dp) + if (!dp) { + rcu_read_unlock(); return -ENODEV; - - rcu_read_lock(); + } for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { struct vport *vport; -- cgit v1.2.3 From 87536a81e1f52409b45333ce8cac415a1218163c Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Fri, 7 Mar 2014 12:44:18 +0100 Subject: net: af_key: fix sleeping under rcu There's a kmalloc with GFP_KERNEL in a helper (pfkey_sadb2xfrm_user_sec_ctx) used in pfkey_compile_policy which is called under rcu_read_lock. Adjust pfkey_sadb2xfrm_user_sec_ctx to have a gfp argument and adjust the users. CC: Dave Jones CC: Steffen Klassert CC: Fan Du CC: David S. Miller Signed-off-by: Nikolay Aleksandrov Signed-off-by: Steffen Klassert --- net/key/af_key.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/key/af_key.c b/net/key/af_key.c index 1a04c1329362..1526023f99ed 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -433,12 +433,13 @@ static inline int verify_sec_ctx_len(const void *p) return 0; } -static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx) +static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx, + gfp_t gfp) { struct xfrm_user_sec_ctx *uctx = NULL; int ctx_size = sec_ctx->sadb_x_ctx_len; - uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL); + uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp); if (!uctx) return NULL; @@ -1124,7 +1125,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; if (sec_ctx != NULL) { - struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); + struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); if (!uctx) goto out; @@ -2231,7 +2232,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; if (sec_ctx != NULL) { - struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); + struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); if (!uctx) { err = -ENOBUFS; @@ -2335,7 +2336,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; if (sec_ctx != NULL) { - struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); + struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); if (!uctx) return -ENOMEM; @@ -3239,7 +3240,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, } if ((*dir = verify_sec_ctx_len(p))) goto out; - uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); + uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC); *dir = security_xfrm_policy_alloc(&xp->security, uctx); kfree(uctx); -- cgit v1.2.3 From 52a4c6404f91f2d2c5592ee6365a8418c4565f53 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Fri, 7 Mar 2014 12:44:19 +0100 Subject: selinux: add gfp argument to security_xfrm_policy_alloc and fix callers security_xfrm_policy_alloc can be called in atomic context so the allocation should be done with GFP_ATOMIC. Add an argument to let the callers choose the appropriate way. In order to do so a gfp argument needs to be added to the method xfrm_policy_alloc_security in struct security_operations and to the internal function selinux_xfrm_alloc_user. After that switch to GFP_ATOMIC in the atomic callers and leave GFP_KERNEL as before for the rest. The path that needed the gfp argument addition is: security_xfrm_policy_alloc -> security_ops.xfrm_policy_alloc_security -> all users of xfrm_policy_alloc_security (e.g. selinux_xfrm_policy_alloc) -> selinux_xfrm_alloc_user (here the allocation used to be GFP_KERNEL only) Now adding a gfp argument to selinux_xfrm_alloc_user requires us to also add it to security_context_to_sid which is used inside and prior to this patch did only GFP_KERNEL allocation. So add gfp argument to security_context_to_sid and adjust all of its callers as well. CC: Paul Moore CC: Dave Jones CC: Steffen Klassert CC: Fan Du CC: David S. Miller CC: LSM list CC: SELinux list Signed-off-by: Nikolay Aleksandrov Acked-by: Paul Moore Signed-off-by: Steffen Klassert --- include/linux/security.h | 10 +++++++--- net/key/af_key.c | 6 +++--- net/xfrm/xfrm_user.c | 6 +++--- security/capability.c | 3 ++- security/security.c | 6 ++++-- security/selinux/hooks.c | 13 +++++++------ security/selinux/include/security.h | 2 +- security/selinux/include/xfrm.h | 3 ++- security/selinux/selinuxfs.c | 28 ++++++++++++++++++---------- security/selinux/ss/services.c | 6 ++++-- security/selinux/xfrm.c | 14 ++++++++------ 11 files changed, 59 insertions(+), 38 deletions(-) diff --git a/include/linux/security.h b/include/linux/security.h index 5623a7f965b7..2fc42d191f79 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Allocate a security structure to the xp->security field; the security * field is initialized to NULL when the xfrm_policy is allocated. * Return 0 if operation was successful (memory to allocate, legal context) + * @gfp is to specify the context for the allocation * @xfrm_policy_clone_security: * @old_ctx contains an existing xfrm_sec_ctx. * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. @@ -1683,7 +1684,7 @@ struct security_operations { #ifdef CONFIG_SECURITY_NETWORK_XFRM int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *sec_ctx); + struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); @@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) #ifdef CONFIG_SECURITY_NETWORK_XFRM -int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx); +int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); @@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); #else /* CONFIG_SECURITY_NETWORK_XFRM */ -static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) +static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp) { return 0; } diff --git a/net/key/af_key.c b/net/key/af_key.c index 1526023f99ed..79326978517a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2239,7 +2239,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ goto out; } - err = security_xfrm_policy_alloc(&xp->security, uctx); + err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL); kfree(uctx); if (err) @@ -2341,7 +2341,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa if (!uctx) return -ENOMEM; - err = security_xfrm_policy_alloc(&pol_ctx, uctx); + err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL); kfree(uctx); if (err) return err; @@ -3241,7 +3241,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, if ((*dir = verify_sec_ctx_len(p))) goto out; uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC); - *dir = security_xfrm_policy_alloc(&xp->security, uctx); + *dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC); kfree(uctx); if (*dir) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c274179d60a2..2f7ddc3a59b4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1221,7 +1221,7 @@ static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs return 0; uctx = nla_data(rt); - return security_xfrm_policy_alloc(&pol->security, uctx); + return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); } static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, @@ -1626,7 +1626,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (rt) { struct xfrm_user_sec_ctx *uctx = nla_data(rt); - err = security_xfrm_policy_alloc(&ctx, uctx); + err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); if (err) return err; } @@ -1928,7 +1928,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (rt) { struct xfrm_user_sec_ctx *uctx = nla_data(rt); - err = security_xfrm_policy_alloc(&ctx, uctx); + err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); if (err) return err; } diff --git a/security/capability.c b/security/capability.c index 8b4f24ae4338..21e2b9cae685 100644 --- a/security/capability.c +++ b/security/capability.c @@ -757,7 +757,8 @@ static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) #ifdef CONFIG_SECURITY_NETWORK_XFRM static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *sec_ctx) + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp) { return 0; } diff --git a/security/security.c b/security/security.c index 15b6928592ef..919cad93ac82 100644 --- a/security/security.c +++ b/security/security.c @@ -1317,9 +1317,11 @@ void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) #ifdef CONFIG_SECURITY_NETWORK_XFRM -int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) +int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp) { - return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx); + return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp); } EXPORT_SYMBOL(security_xfrm_policy_alloc); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4b34847208cc..b332e2cc0954 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -668,7 +668,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, if (flags[i] == SBLABEL_MNT) continue; rc = security_context_to_sid(mount_options[i], - strlen(mount_options[i]), &sid); + strlen(mount_options[i]), &sid, GFP_KERNEL); if (rc) { printk(KERN_WARNING "SELinux: security_context_to_sid" "(%s) failed for (dev %s, type %s) errno=%d\n", @@ -2489,7 +2489,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data) if (flags[i] == SBLABEL_MNT) continue; len = strlen(mount_options[i]); - rc = security_context_to_sid(mount_options[i], len, &sid); + rc = security_context_to_sid(mount_options[i], len, &sid, + GFP_KERNEL); if (rc) { printk(KERN_WARNING "SELinux: security_context_to_sid" "(%s) failed for (dev %s, type %s) errno=%d\n", @@ -2893,7 +2894,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, if (rc) return rc; - rc = security_context_to_sid(value, size, &newsid); + rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL); if (rc == -EINVAL) { if (!capable(CAP_MAC_ADMIN)) { struct audit_buffer *ab; @@ -3050,7 +3051,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name, if (!value || !size) return -EACCES; - rc = security_context_to_sid((void *)value, size, &newsid); + rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL); if (rc) return rc; @@ -5529,7 +5530,7 @@ static int selinux_setprocattr(struct task_struct *p, str[size-1] = 0; size--; } - error = security_context_to_sid(value, size, &sid); + error = security_context_to_sid(value, size, &sid, GFP_KERNEL); if (error == -EINVAL && !strcmp(name, "fscreate")) { if (!capable(CAP_MAC_ADMIN)) { struct audit_buffer *ab; @@ -5638,7 +5639,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { - return security_context_to_sid(secdata, seclen, secid); + return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL); } static void selinux_release_secctx(char *secdata, u32 seclen) diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 8ed8daf7f1ee..ce7852cf526b 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -134,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext, int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); int security_context_to_sid(const char *scontext, u32 scontext_len, - u32 *out_sid); + u32 *out_sid, gfp_t gfp); int security_context_to_sid_default(const char *scontext, u32 scontext_len, u32 *out_sid, u32 def_sid, gfp_t gfp_flags); diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 48c3cc94c168..9f0584710c85 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h @@ -10,7 +10,8 @@ #include int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *uctx); + struct xfrm_user_sec_ctx *uctx, + gfp_t gfp); int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 5122affe06a8..d60c0ee66387 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -576,7 +576,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size) if (length) goto out; - length = security_context_to_sid(buf, size, &sid); + length = security_context_to_sid(buf, size, &sid, GFP_KERNEL); if (length) goto out; @@ -731,11 +731,13 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size) if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) goto out; - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); if (length) goto out; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); if (length) goto out; @@ -817,11 +819,13 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size) objname = namebuf; } - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); if (length) goto out; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); if (length) goto out; @@ -878,11 +882,13 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size) if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) goto out; - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); if (length) goto out; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); if (length) goto out; @@ -934,7 +940,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size) if (sscanf(buf, "%s %s", con, user) != 2) goto out; - length = security_context_to_sid(con, strlen(con) + 1, &sid); + length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL); if (length) goto out; @@ -994,11 +1000,13 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size) if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) goto out; - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); if (length) goto out; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); if (length) goto out; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 5d0144ee8ed6..4bca49414a40 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1289,16 +1289,18 @@ out: * @scontext: security context * @scontext_len: length in bytes * @sid: security identifier, SID + * @gfp: context for the allocation * * Obtains a SID associated with the security context that * has the string representation specified by @scontext. * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient * memory is available, or 0 on success. */ -int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) +int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid, + gfp_t gfp) { return security_context_to_sid_core(scontext, scontext_len, - sid, SECSID_NULL, GFP_KERNEL, 0); + sid, SECSID_NULL, gfp, 0); } /** diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 0462cb3ff0a7..98b042630a9e 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -78,7 +78,8 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x) * xfrm_user_sec_ctx context. */ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *uctx) + struct xfrm_user_sec_ctx *uctx, + gfp_t gfp) { int rc; const struct task_security_struct *tsec = current_security(); @@ -94,7 +95,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, if (str_len >= PAGE_SIZE) return -ENOMEM; - ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL); + ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp); if (!ctx) return -ENOMEM; @@ -103,7 +104,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, ctx->ctx_len = str_len; memcpy(ctx->ctx_str, &uctx[1], str_len); ctx->ctx_str[str_len] = '\0'; - rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid); + rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp); if (rc) goto err; @@ -282,9 +283,10 @@ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. */ int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *uctx) + struct xfrm_user_sec_ctx *uctx, + gfp_t gfp) { - return selinux_xfrm_alloc_user(ctxp, uctx); + return selinux_xfrm_alloc_user(ctxp, uctx, gfp); } /* @@ -332,7 +334,7 @@ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx) { - return selinux_xfrm_alloc_user(&x->security, uctx); + return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL); } /* -- cgit v1.2.3 From 2dc3a8e0b6e10c77798a4f6f711ce66334eda1c4 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Thu, 6 Mar 2014 15:51:08 -0500 Subject: brcmfmac: fix skb leak in brcmf_sdio_txpkt_prep_sg error path. Commit 1eb4301867 (brcmfmac: fix txglomming scatter-gather packet transfers) added an allocation of an skb via brcmu_pkt_buf_get_skb() but forgot to free it on one of the error paths. Acked-by: Arend van Spriel Signed-off-by: Dave Jones Signed-off-by: John W. Linville --- drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 119ee6eaf1c3..ddaa9efd053d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -1948,8 +1948,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, if (pkt_pad == NULL) return -ENOMEM; ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { + kfree_skb(pkt_pad); return ret; + } memcpy(pkt_pad->data, pkt->data + pkt->len - tail_chop, tail_chop); -- cgit v1.2.3 From 105ff411c96c52c67261efbe245f0947d39ebce7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 9 Mar 2014 09:51:16 +0100 Subject: ath9k_hw: fix unreachable code in baseband hang detection code The commit "ath9k: reduce baseband hang detection false positive rate" added a delay in the loop checking the baseband state, however it was unreachable due to previous 'continue' statements. Reported-by: Dan Carpenter Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville --- drivers/net/wireless/ath/ath9k/hw.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 303ce27964c1..9078a6c5a74e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1548,6 +1548,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) if (reg != last_val) return true; + udelay(1); last_val = reg; if ((reg & 0x7E7FFFEF) == 0x00702400) continue; @@ -1560,8 +1561,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) default: return true; } - - udelay(1); } while (count-- > 0); return false; -- cgit v1.2.3 From 5998be879719384af2014b79697eed6e38ee2706 Mon Sep 17 00:00:00 2001 From: Helmut Schaa Date: Wed, 12 Mar 2014 10:37:55 +0100 Subject: ath9k: Fix sequence number assignment for non-data frames Since commit 558ff225de80ac95b132d3a115ddadcd64498b4f (ath9k: fix ps-poll responses under a-mpdu sessions) non-data frames would have gotten a sequence number from a TIDs sequence counter instead of using the global sequence counter. This can lead to instable connections. To fix this only select the correct TID if we are processing a data frame. Furthermore, prevent non-data frames to get a sequence number from a TID sequence counter by adding a check to ath_tx_setup_buffer. Cc: Felix Fietkau Signed-off-by: Helmut Schaa Acked-by: Felix Fietkau Signed-off-by: John W. Linville --- drivers/net/wireless/ath/ath9k/xmit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index f042a18c8495..55897d508a76 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2063,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, ATH_TXBUF_RESET(bf); - if (tid) { + if (tid && ieee80211_is_data_present(hdr->frame_control)) { fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; seqno = tid->seq_next; hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); @@ -2186,7 +2186,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, txq->stopped = true; } - if (txctl->an) + if (txctl->an && ieee80211_is_data_present(hdr->frame_control)) tid = ath_get_skb_tid(sc, txctl->an, skb); if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { -- cgit v1.2.3 From 584221918925f0d3bbc8ff6de9cfdb703c06a29c Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 12 Mar 2014 15:14:04 +0100 Subject: Revert "rt2x00: rt2800lib: Update BBP register initialization for RT53xx" This reverts commit eac40d9631a7db43570df859fa8a9922e9623607. It cause random connection drops on RT5390 PCI adapters. On Mediatek there is different driver version available for RT53xx chip based on bus type (2.5.0.3 for PCI and 2.6.1.3 for USB). Hence possibly we should set registers differently based on bus type. But is also possible that new driver (i.e. 2.6.1.3) was not verified on RT53xx USB. Until we figure out how to initialize registers properly for RT53xx just revert commit eac40d9631a7db43570df859fa8a9922e9623607 since it cause regression. Signed-off-by: Stanislaw Gruszka Signed-off-by: John W. Linville --- drivers/net/wireless/rt2x00/rt2800lib.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 7f8b5d156c8c..41d4a8167dc3 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 68, 0x0b); - rt2800_bbp_write(rt2x00dev, 69, 0x0d); - rt2800_bbp_write(rt2x00dev, 70, 0x06); + rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); rt2800_bbp_write(rt2x00dev, 77, 0x59); + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + rt2800_bbp_write(rt2x00dev, 79, 0x13); rt2800_bbp_write(rt2x00dev, 80, 0x05); rt2800_bbp_write(rt2x00dev, 81, 0x33); @@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 134, 0xd0); rt2800_bbp_write(rt2x00dev, 135, 0xf6); - rt2800_bbp_write(rt2x00dev, 148, 0x84); } rt2800_disable_unused_dac_adc(rt2x00dev); -- cgit v1.2.3 From fcad3e6b576e78fce9ffca124a15de86b64453a0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 13 Mar 2014 10:11:45 -0700 Subject: MAINTAINERS: Add linux.nics@intel.com to INTEL ETHERNET DRIVERS If this is added to the driver files, then maybe it's appropriate to add to MAINTAINERS as well. Signed-off-by: Joe Perches Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index b3fdb0f004ba..12a6b2973492 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4545,6 +4545,7 @@ M: Greg Rose M: Alex Duyck M: John Ronciak M: Mitch Williams +M: Linux NICS L: e1000-devel@lists.sourceforge.net W: http://www.intel.com/support/feedback.htm W: http://e1000.sourceforge.net/ -- cgit v1.2.3 From 32fc3fd41ae44a41ffb8fd5ec4f4764822104655 Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Fri, 14 Mar 2014 10:07:44 +0100 Subject: net: phy: fix uninitalized ethtool_wolinfo in phy_suspend Callers of phy_ethtool_get_wol are supposed to provide a properly cleared struct ethtool_wolinfo. Therefore, fix phy_suspend to clear it before passing it to phy_ethtool_get_wol. Signed-off-by: Sebastian Hesselbarth Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 4b970f7624c0..2f6989b1e0dc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach); int phy_suspend(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); - struct ethtool_wolinfo wol; + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; /* If the device has WOL enabled, we cannot suspend the PHY */ - wol.cmd = ETHTOOL_GWOL; phy_ethtool_get_wol(phydev, &wol); if (wol.wolopts) return -EBUSY; -- cgit v1.2.3 From 8d7f1fbf083e19688619e6ca25c95434a2c30537 Mon Sep 17 00:00:00 2001 From: Peter Senna Tschudin Date: Sun, 16 Mar 2014 00:30:52 +0100 Subject: ATHEROS-ALX: Use dma_set_mask_and_coherent and fix a bug 1. For the 64 bits dma mask use dma_set_mask_and_coherent instead of dma_set_mask and dma_set_coherent_mask. 2. For the 32 bits dma mask dma_set_coherent_mask is only called if dma_set_mask fails, which is unusual. Assuming this as a bug, fixes it by replacing calls to dma_set_mask and dma_set_coherent_mask by a call to dma_set_mask_and_coherent. Signed-off-by: Peter Senna Tschudin Tested-by: Jonas Hahnfeld Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/main.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 2e45f6ec1bf0..380d24922049 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used for descriptors. */ - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA config, aborting\n"); - goto out_pci_disable; - } + dev_err(&pdev->dev, "No usable DMA config, aborting\n"); + goto out_pci_disable; } } -- cgit v1.2.3 From b085f311e85b1d6f75d610097c2f20583b776fda Mon Sep 17 00:00:00 2001 From: Benedikt Spranger Date: Sun, 16 Mar 2014 16:36:29 +0100 Subject: net: cpsw: do not register cpts twice commit f280e89a (drivers: net: cpsw: fix for cpsw crash when build as modules) moved cpts_register()/cpts_unregister() to ndo_open()/ndo_stop(), but failed to remove cpts_register in cpsw_probe() which leads to a double registration and the following debug object splat. [ 18.991902] ODEBUG: init active (active state 0) object type: timer_list hint: delayed_work_timer_fn+0x0/0x2c [ 19.082249] [] (init_timer_key) from [] (cpts_register+0x1f0/0x2c4) [ 19.090642] [] (cpts_register) from [] (cpsw_ndo_open+0x780/0x81c) [ 19.098948] [] (cpsw_ndo_open) from [] (__dev_open+0xb4/0x118) Signed-off-by: Benedikt Spranger Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ffd4d12acf6d..7d6d8ec676c8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2229,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - if (cpts_register(&pdev->dev, priv->cpts, - data->cpts_clock_mult, data->cpts_clock_shift)) - dev_err(priv->dev, "error registering cpts device\n"); - cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", &ss_res->start, ndev->irq); -- cgit v1.2.3 From d1958f8c2f0d6bfaa115f4dce1d5d19be5c6b090 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Sun, 16 Mar 2014 18:44:21 +0100 Subject: isdn/capi: Make Middleware depend on CAPI2.0 The Kconfig symbol ISDN_CAPI_MIDDLEWARE is only used in capi.c. Setting it without setting ISDN_CAPI_CAPI20 is therefor useless. Make it depend on ISDN_CAPI_CAPI20 and put its entry after ISDN_CAPI_CAPI20's entry. Signed-off-by: Paul Bolle Signed-off-by: Tilman Schmidt Signed-off-by: David S. Miller --- drivers/isdn/capi/Kconfig | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig index f04686580040..9816c51eb5c2 100644 --- a/drivers/isdn/capi/Kconfig +++ b/drivers/isdn/capi/Kconfig @@ -16,9 +16,17 @@ config CAPI_TRACE This will increase the size of the kernelcapi module by 20 KB. If unsure, say Y. +config ISDN_CAPI_CAPI20 + tristate "CAPI2.0 /dev/capi support" + help + This option will provide the CAPI 2.0 interface to userspace + applications via /dev/capi20. Applications should use the + standardized libcapi20 to access this functionality. You should say + Y/M here. + config ISDN_CAPI_MIDDLEWARE bool "CAPI2.0 Middleware support" - depends on TTY + depends on ISDN_CAPI_CAPI20 && TTY help This option will enhance the capabilities of the /dev/capi20 interface. It will provide a means of moving a data connection, @@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE device. If you want to use pppd with pppdcapiplugin to dial up to your ISP, say Y here. -config ISDN_CAPI_CAPI20 - tristate "CAPI2.0 /dev/capi support" - help - This option will provide the CAPI 2.0 interface to userspace - applications via /dev/capi20. Applications should use the - standardized libcapi20 to access this functionality. You should say - Y/M here. - config ISDN_CAPI_CAPIDRV tristate "CAPI2.0 capidrv interface support" depends on ISDN_I4L -- cgit v1.2.3 From e367c2d03dba4c9bcafad24688fadb79dd95b218 Mon Sep 17 00:00:00 2001 From: lucien Date: Mon, 17 Mar 2014 12:51:01 +0800 Subject: ipv6: ip6_append_data_mtu do not handle the mtu of the second fragment properly In ip6_append_data_mtu(), when the xfrm mode is not tunnel(such as transport),the ipsec header need to be added in the first fragment, so the mtu will decrease to reserve space for it, then the second fragment come, the mtu should be turn back, as the commit 0c1833797a5a6ec23ea9261d979aa18078720b74 said. however, in the commit a493e60ac4bbe2e977e7129d6d8cbb0dd236be, it use *mtu = min(*mtu, ...) to change the mtu, which lead to the new mtu is alway equal with the first fragment's. and cannot turn back. when I test through ping6 -c1 -s5000 $ip (mtu=1280): ...frag (0|1232) ESP(spi=0x00002000,seq=0xb), length 1232 ...frag (1232|1216) ...frag (2448|1216) ...frag (3664|1216) ...frag (4880|164) which should be: ...frag (0|1232) ESP(spi=0x00001000,seq=0x1), length 1232 ...frag (1232|1232) ...frag (2464|1232) ...frag (3696|1232) ...frag (4928|116) so delete the min() when change back the mtu. Signed-off-by: Xin Long Fixes: 75a493e60ac4bb ("ipv6: ip6_append_data_mtu did not care about pmtudisc and frag_size") Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16f91a2e7888..64d6073731d3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1101,21 +1101,19 @@ static void ip6_append_data_mtu(unsigned int *mtu, unsigned int fragheaderlen, struct sk_buff *skb, struct rt6_info *rt, - bool pmtuprobe) + unsigned int orig_mtu) { if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { if (skb == NULL) { /* first fragment, reserve header_len */ - *mtu = *mtu - rt->dst.header_len; + *mtu = orig_mtu - rt->dst.header_len; } else { /* * this fragment is not first, the headers * space is regarded as data space. */ - *mtu = min(*mtu, pmtuprobe ? - rt->dst.dev->mtu : - dst_mtu(rt->dst.path)); + *mtu = orig_mtu; } *maxfraglen = ((*mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); @@ -1132,7 +1130,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, struct ipv6_pinfo *np = inet6_sk(sk); struct inet_cork *cork; struct sk_buff *skb, *skb_prev = NULL; - unsigned int maxfraglen, fragheaderlen, mtu; + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; int exthdrlen; int dst_exthdrlen; int hh_len; @@ -1214,6 +1212,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, dst_exthdrlen = 0; mtu = cork->fragsize; } + orig_mtu = mtu; hh_len = LL_RESERVED_SPACE(rt->dst.dev); @@ -1311,8 +1310,7 @@ alloc_new_skb: if (skb == NULL || skb_prev == NULL) ip6_append_data_mtu(&mtu, &maxfraglen, fragheaderlen, skb, rt, - np->pmtudisc >= - IPV6_PMTUDISC_PROBE); + orig_mtu); skb_prev = skb; -- cgit v1.2.3 From ff0992e9036e9810e7cd45234fa32ca1e79750e2 Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Mon, 17 Mar 2014 16:25:18 +0100 Subject: net: cdc_ncm: fix control message ordering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a context modified revert of commit 6a9612e2cb22 ("net: cdc_ncm: remove ncm_parm field") which introduced a NCM specification violation, causing setup errors for some devices. These errors resulted in the device and host disagreeing about shared settings, with complete failure to communicate as the end result. The NCM specification require that many of the NCM specific control reuests are sent only while the NCM Data Interface is in alternate setting 0. Reverting the commit ensures that we follow this requirement. Fixes: 6a9612e2cb22 ("net: cdc_ncm: remove ncm_parm field") Reported-and-tested-by: Pasi Kärkkäinen Reported-by: Thomas Schäfer Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ncm.c | 48 ++++++++++++++++++++++----------------------- include/linux/usb/cdc_ncm.h | 1 + 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index dbff290ed0e4..d350d2795e10 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver; static int cdc_ncm_setup(struct usbnet *dev) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - struct usb_cdc_ncm_ntb_parameters ncm_parm; u32 val; u8 flags; u8 iface_no; @@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev) err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, USB_TYPE_CLASS | USB_DIR_IN |USB_RECIP_INTERFACE, - 0, iface_no, &ncm_parm, - sizeof(ncm_parm)); + 0, iface_no, &ctx->ncm_parm, + sizeof(ctx->ncm_parm)); if (err < 0) { dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); return err; /* GET_NTB_PARAMETERS is required */ } /* read correct set of parameters according to device mode */ - ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize); - ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize); - ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder); - ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor); - ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment); + ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); + ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); + ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); + ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); + ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); /* devices prior to NCM Errata shall set this field to zero */ - ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams); - ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported); + ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); + ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); /* there are some minor differences in NCM and MBIM defaults */ if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { @@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev) } /* inform device about NTB input size changes */ - if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) { + if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, @@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev) dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", CDC_NCM_NTB_MAX_SIZE_TX); ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; - - /* Adding a pad byte here simplifies the handling in - * cdc_ncm_fill_tx_frame, by making tx_max always - * represent the real skb max size. - */ - if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) - ctx->tx_max++; - } /* @@ -439,6 +430,10 @@ advance: goto error2; } + /* initialize data interface */ + if (cdc_ncm_setup(dev)) + goto error2; + /* configure data interface */ temp = usb_set_interface(dev->udev, iface_no, data_altsetting); if (temp) { @@ -453,12 +448,6 @@ advance: goto error2; } - /* initialize data interface */ - if (cdc_ncm_setup(dev)) { - dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n"); - goto error2; - } - usb_set_intfdata(ctx->data, dev); usb_set_intfdata(ctx->control, dev); @@ -475,6 +464,15 @@ advance: dev->hard_mtu = ctx->tx_max; dev->rx_urb_size = ctx->rx_max; + /* cdc_ncm_setup will override dwNtbOutMaxSize if it is + * outside the sane range. Adding a pad byte here if necessary + * simplifies the handling in cdc_ncm_fill_tx_frame, making + * tx_max always represent the real skb max size. + */ + if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && + ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) + ctx->tx_max++; + return 0; error2: diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index c3fa80745996..2c14d9cdd57a 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -88,6 +88,7 @@ #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) struct cdc_ncm_ctx { + struct usb_cdc_ncm_ntb_parameters ncm_parm; struct hrtimer tx_timer; struct tasklet_struct bh; -- cgit v1.2.3 From f7bd12d09ed6e4093a56dbbfbe8411cc52a738d1 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 17 Mar 2014 19:19:06 -0800 Subject: cnic: Use proper ulp_ops for per device operations. For per device operations, cnic needs to dereference the RCU protected cp->ulp_ops instead of the global cnic_ulp_tbl. In 2 locations, cnic_send_nlmsg() and cnic_copy_ulp_stats(), it was referencing the global table. If the device has been unregistered and these functions are still being called (very unlikely scenarios), it could lead to NULL pointer dereference. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index fcf9105a5476..2cb248269c8f 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, while (retry < 3) { rc = 0; rcu_read_lock(); - ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); + ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); if (ulp_ops) rc = ulp_ops->iscsi_nl_send_msg( cp->ulp_handle[CNIC_ULP_ISCSI], @@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) int rc; mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(ulp_type); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], + lockdep_is_held(&cnic_lock)); if (ulp_ops && ulp_ops->cnic_get_stats) rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); else -- cgit v1.2.3 From be1fefc21433f6202fcd76bdc7916e557fe80b9a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 17 Mar 2014 19:19:07 -0800 Subject: cnic,bnx2i,bnx2fc: Fix inconsistent use of page size The bnx2/bnx2x rings are made up of linked pages. However there is an upper limit on the page size as some the page size settings are 16-bit in the hardware/firmware interface. In the current code, some parts use BNX2_PAGE_SIZE which has a 16K upper limit and some parts use PAGE_SIZE. On archs with >= 64K PAGE_SIZE, it generates some compile warnings. Define a new CNIC_PAGE_SZIE which has an upper limit of 16K and use it consistently in all relevant parts. Signed-off-by: Michael Chan Signed-off-by: Eddie Wai Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 104 ++++++++++++++++---------------- drivers/net/ethernet/broadcom/cnic_if.h | 10 +++ drivers/scsi/bnx2fc/bnx2fc_io.c | 16 ++--- drivers/scsi/bnx2fc/bnx2fc_tgt.c | 38 ++++++------ drivers/scsi/bnx2i/bnx2i_hwi.c | 52 ++++++++-------- drivers/scsi/bnx2i/bnx2i_iscsi.c | 23 +++---- 6 files changed, 129 insertions(+), 114 deletions(-) diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 2cb248269c8f..ea37ea209234 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) for (i = 0; i < dma->num_pages; i++) { if (dma->pg_arr[i]) { - dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, + dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, dma->pg_arr[i], dma->pg_map_arr[i]); dma->pg_arr[i] = NULL; } @@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, for (i = 0; i < pages; i++) { dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, - BNX2_PAGE_SIZE, + CNIC_PAGE_SIZE, &dma->pg_map_arr[i], GFP_ATOMIC); if (dma->pg_arr[i] == NULL) @@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, if (!use_pg_tbl) return 0; - dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & - ~(BNX2_PAGE_SIZE - 1); + dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & + ~(CNIC_PAGE_SIZE - 1); dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, &dma->pgtbl_map, GFP_ATOMIC); if (dma->pgtbl == NULL) @@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev) if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { int i, k, arr_size; - cp->ctx_blk_size = BNX2_PAGE_SIZE; - cp->cids_per_blk = BNX2_PAGE_SIZE / 128; + cp->ctx_blk_size = CNIC_PAGE_SIZE; + cp->cids_per_blk = CNIC_PAGE_SIZE / 128; arr_size = BNX2_MAX_CID / cp->cids_per_blk * sizeof(struct cnic_ctx); cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); @@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev) for (i = 0; i < cp->ctx_blks; i++) { cp->ctx_arr[i].ctx = dma_alloc_coherent(&dev->pcidev->dev, - BNX2_PAGE_SIZE, + CNIC_PAGE_SIZE, &cp->ctx_arr[i].mapping, GFP_KERNEL); if (cp->ctx_arr[i].ctx == NULL) @@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) if (udev->l2_ring) return 0; - udev->l2_ring_size = pages * BNX2_PAGE_SIZE; + udev->l2_ring_size = pages * CNIC_PAGE_SIZE; udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, &udev->l2_ring_map, GFP_KERNEL | __GFP_COMP); @@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) return -ENOMEM; udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; - udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); + udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, &udev->l2_buf_map, GFP_KERNEL | __GFP_COMP); @@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & - PAGE_MASK; + CNIC_PAGE_MASK; if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; else @@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & - PAGE_MASK; + CNIC_PAGE_MASK; uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); uinfo->name = "bnx2x_cnic"; @@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; - pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / - PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / + CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); if (ret) return -ENOMEM; - n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; + n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; for (i = 0, j = 0; i < cp->max_cid_space; i++) { long off = CNIC_KWQ16_DATA_SIZE * (i % n); @@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) goto error; } - pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); if (ret) goto error; @@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * BNX2X_ISCSI_R2TQE_SIZE; cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; - pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; - hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); + pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; + hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); cp->num_cqs = req1->num_cqs; if (!dev->max_iscsi_conn) @@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), req1->rq_num_wqes); CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), req1->rq_buffer_size); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) /* init Xstorm RAM */ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) /* init Cstorm RAM */ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); + CNIC_PAGE_SIZE); CNIC_WR8(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); @@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) } ctx->cid = cid; - pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); if (ret) goto error; - pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); if (ret) goto error; - pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; + pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); if (ret) goto error; @@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; /* TSTORM requires the base address of RQ DB & not PTE */ ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = - req2->rq_page_table_addr_lo & PAGE_MASK; + req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = req2->rq_page_table_addr_hi; ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; @@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], /* CSTORM and USTORM initialization is different, CSTORM requires * CQ DB base & not PTE addr */ ictx->cstorm_st_context.cq_db_base.lo = - req1->cq_page_table_addr_lo & PAGE_MASK; + req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; @@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp) u16 hw_cons, sw_cons; struct cnic_uio_dev *udev = cp->udev; union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) - (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); + (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); u32 cmd; int comp = 0; @@ -4385,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; u32 val; - memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); + memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); @@ -4629,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); - rxbd = udev->l2_ring + BNX2_PAGE_SIZE; + rxbd = udev->l2_ring + CNIC_PAGE_SIZE; for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { dma_addr_t buf_map; int n = (i % cp->l2_rx_ring_size) + 1; @@ -4640,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; } - val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; + val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); rxbd->rx_bd_haddr_hi = val; - val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; + val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); rxbd->rx_bd_haddr_lo = val; @@ -4710,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) val = CNIC_RD(dev, BNX2_MQ_CONFIG); val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; - if (BNX2_PAGE_BITS > 12) + if (CNIC_PAGE_BITS > 12) val |= (12 - 8) << 4; else - val |= (BNX2_PAGE_BITS - 8) << 4; + val |= (CNIC_PAGE_BITS - 8) << 4; CNIC_WR(dev, BNX2_MQ_CONFIG, val); @@ -4743,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) /* Initialize the kernel work queue context. */ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); - val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; + val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; + val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); @@ -4769,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) /* Initialize the kernel complete queue context. */ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); - val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; + val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; + val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); @@ -4919,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, u32 cli = cp->ethdev->iscsi_l2_client_id; u32 val; - memset(txbd, 0, BNX2_PAGE_SIZE); + memset(txbd, 0, CNIC_PAGE_SIZE); buf_map = udev->l2_buf_map; for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { @@ -4979,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_uio_dev *udev = cp->udev; struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + - BNX2_PAGE_SIZE); + CNIC_PAGE_SIZE); struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) - (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); + (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; int i; u32 cli = cp->ethdev->iscsi_l2_client_id; @@ -5005,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); } - val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; + val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; rxbd->addr_hi = cpu_to_le32(val); data->rx.bd_page_base.hi = cpu_to_le32(val); - val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; + val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; rxbd->addr_lo = cpu_to_le32(val); data->rx.bd_page_base.lo = cpu_to_le32(val); rxcqe += BNX2X_MAX_RCQ_DESC_CNT; - val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; + val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; rxcqe->addr_hi = cpu_to_le32(val); data->rx.cqe_page_base.hi = cpu_to_le32(val); - val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; + val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; rxcqe->addr_lo = cpu_to_le32(val); data->rx.cqe_page_base.lo = cpu_to_le32(val); @@ -5266,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) msleep(10); } clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); - rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; - memset(rx_ring, 0, BNX2_PAGE_SIZE); + rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; + memset(rx_ring, 0, CNIC_PAGE_SIZE); } static int cnic_register_netdev(struct cnic_dev *dev) diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8cf6b1926069..40bb5630d79b 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -24,6 +24,16 @@ #define MAX_CNIC_ULP_TYPE_EXT 3 #define MAX_CNIC_ULP_TYPE 4 +/* Use CPU native page size up to 16K for cnic ring sizes. */ +#if (PAGE_SHIFT > 14) +#define CNIC_PAGE_BITS 14 +#else +#define CNIC_PAGE_BITS PAGE_SHIFT +#endif +#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS)) +#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) +#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1)) + struct kwqe { u32 kwqe_op_flag; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index ed880891cb7c..e9279a8c1e1c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) mp_req->mp_resp_bd = NULL; } if (mp_req->req_buf) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, mp_req->req_buf, mp_req->req_buf_dma); mp_req->req_buf = NULL; } if (mp_req->resp_buf) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, mp_req->resp_buf, mp_req->resp_buf_dma); mp_req->resp_buf = NULL; @@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) mp_req->req_len = sizeof(struct fcp_cmnd); io_req->data_xfer_len = mp_req->req_len; - mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &mp_req->req_buf_dma, GFP_ATOMIC); if (!mp_req->req_buf) { @@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) return FAILED; } - mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_ATOMIC); if (!mp_req->resp_buf) { @@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) bnx2fc_free_mp_resc(io_req); return FAILED; } - memset(mp_req->req_buf, 0, PAGE_SIZE); - memset(mp_req->resp_buf, 0, PAGE_SIZE); + memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); + memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); /* Allocate and map mp_req_bd and mp_resp_bd */ sz = sizeof(struct fcoe_bd_ctx); @@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) mp_req_bd = mp_req->mp_req_bd; mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); - mp_req_bd->buf_len = PAGE_SIZE; + mp_req_bd->buf_len = CNIC_PAGE_SIZE; mp_req_bd->flags = 0; /* @@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) addr = mp_req->resp_buf_dma; mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); - mp_resp_bd->buf_len = PAGE_SIZE; + mp_resp_bd->buf_len = CNIC_PAGE_SIZE; mp_resp_bd->flags = 0; return SUCCESS; diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 4d93177dfb53..d9bae5672273 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map SQ */ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; - tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, &tgt->sq_dma, GFP_KERNEL); @@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map CQ */ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; - tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, &tgt->cq_dma, GFP_KERNEL); @@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map RQ and RQ PBL */ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; - tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, &tgt->rq_dma, GFP_KERNEL); @@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, } memset(tgt->rq, 0, tgt->rq_mem_size); - tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); - tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, &tgt->rq_pbl_dma, GFP_KERNEL); @@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, } memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); - num_pages = tgt->rq_mem_size / PAGE_SIZE; + num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; page = tgt->rq_dma; pbl = (u32 *)tgt->rq_pbl; @@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, pbl++; *pbl = (u32)((u64)page >> 32); pbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } /* Allocate and map XFERQ */ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; - tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & - PAGE_MASK; + tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, &tgt->xferq_dma, GFP_KERNEL); @@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map CONFQ & CONFQ PBL */ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; - tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & - PAGE_MASK; + tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, &tgt->confq_dma, GFP_KERNEL); @@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, memset(tgt->confq, 0, tgt->confq_mem_size); tgt->confq_pbl_size = - (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); + (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); tgt->confq_pbl_size = - (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, @@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, } memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); - num_pages = tgt->confq_mem_size / PAGE_SIZE; + num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; page = tgt->confq_dma; pbl = (u32 *)tgt->confq_pbl; @@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, pbl++; *pbl = (u32)((u64)page >> 32); pbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } /* Allocate and map ConnDB */ @@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map LCQ */ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; - tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & - PAGE_MASK; + tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, &tgt->lcq_dma, GFP_KERNEL); diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index e4cf23df4b4f..b87a1933f880 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) * yield integral num of page buffers */ /* adjust SQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; if (hba->max_sqes < num_elements_per_pg) hba->max_sqes = num_elements_per_pg; else if (hba->max_sqes % num_elements_per_pg) @@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) ~(num_elements_per_pg - 1); /* adjust CQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; if (hba->max_cqes < num_elements_per_pg) hba->max_cqes = num_elements_per_pg; else if (hba->max_cqes % num_elements_per_pg) @@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) ~(num_elements_per_pg - 1); /* adjust RQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; if (hba->max_rqes < num_elements_per_pg) hba->max_rqes = num_elements_per_pg; else if (hba->max_rqes % num_elements_per_pg) @@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) /* SQ page table */ memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); - num_pages = ep->qp.sq_mem_size / PAGE_SIZE; + num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.sq_phys; if (cnic_dev_10g) @@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ @@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) page; ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } } /* RQ page table */ memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); - num_pages = ep->qp.rq_mem_size / PAGE_SIZE; + num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.rq_phys; if (cnic_dev_10g) @@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ @@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) page; ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } } /* CQ page table */ memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); - num_pages = ep->qp.cq_mem_size / PAGE_SIZE; + num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.cq_phys; if (cnic_dev_10g) @@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ @@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) page; ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } } } @@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate page table memory for SQ which is page aligned */ ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; ep->qp.sq_mem_size = - (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.sq_pgtbl_size = - (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); + (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.sq_pgtbl_size = - (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.sq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, @@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate page table memory for CQ which is page aligned */ ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; ep->qp.cq_mem_size = - (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.cq_pgtbl_size = - (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); + (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.cq_pgtbl_size = - (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.cq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, @@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate page table memory for RQ which is page aligned */ ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; ep->qp.rq_mem_size = - (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.rq_pgtbl_size = - (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); + (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.rq_pgtbl_size = - (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.rq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, @@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) bnx2i_adjust_qp_size(hba); iscsi_init.flags = - ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; + (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; if (en_tcp_dack) iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; iscsi_init.reserved0 = 0; @@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); iscsi_init.num_ccells_per_conn = hba->num_ccell; iscsi_init.num_tasks_per_conn = hba->max_sqes; - iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; iscsi_init.sq_num_wqes = hba->max_sqes; iscsi_init.cq_log_wqes_per_page = - (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); + (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); iscsi_init.cq_num_wqes = hba->max_cqes; iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + - (PAGE_SIZE - 1)) / PAGE_SIZE; + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + - (PAGE_SIZE - 1)) / PAGE_SIZE; + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; iscsi_init.rq_num_wqes = hba->max_rqes; diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 854dad7d5b03..c8b0aff5bbd4 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) struct iscsi_bd *mp_bdt; u64 addr; - hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &hba->mp_bd_dma, GFP_KERNEL); if (!hba->mp_bd_tbl) { printk(KERN_ERR "unable to allocate Middle Path BDT\n"); @@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) goto out; } - hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, + CNIC_PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; rc = -1; @@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) addr = (unsigned long) hba->dummy_buf_dma; mp_bdt->buffer_addr_lo = addr & 0xffffffff; mp_bdt->buffer_addr_hi = addr >> 32; - mp_bdt->buffer_length = PAGE_SIZE; + mp_bdt->buffer_length = CNIC_PAGE_SIZE; mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; out: @@ -565,12 +566,12 @@ out: static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) { if (hba->mp_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; } if (hba->dummy_buffer) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } @@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { if (bnx2i_conn->gen_pdu.resp_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.resp_bd_tbl, bnx2i_conn->gen_pdu.resp_bd_dma); bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.req_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; @@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; bnx2i_conn->gen_pdu.req_bd_tbl = - dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) goto login_req_bd_tbl_failure; bnx2i_conn->gen_pdu.resp_bd_tbl = - dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.resp_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) @@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, return 0; login_resp_bd_tbl_failure: - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; -- cgit v1.2.3 From c3661283f947ff5023b717240d069e9be765b0a9 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 17 Mar 2014 19:19:08 -0800 Subject: cnic: Update version to 2.5.20 and copyright year. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 2 +- drivers/net/ethernet/broadcom/cnic.h | 2 +- drivers/net/ethernet/broadcom/cnic_defs.h | 2 +- drivers/net/ethernet/broadcom/cnic_if.h | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index ea37ea209234..09f3fefcbf9c 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,6 +1,6 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 0d6b13f854d9..d535ae4228b4 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -1,6 +1,6 @@ /* cnic.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index 95a8e4b11c9f..dcbca6997e8f 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -1,7 +1,7 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 40bb5630d79b..5f4d5573a73d 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@ /* cnic_if.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.19" -#define CNIC_MODULE_RELDATE "December 19, 2013" +#define CNIC_MODULE_VERSION "2.5.20" +#define CNIC_MODULE_RELDATE "March 14, 2014" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 -- cgit v1.2.3 From 7346135dcd3f9b57f30a5512094848c678d7143e Mon Sep 17 00:00:00 2001 From: David Stevens Date: Tue, 18 Mar 2014 12:32:29 -0400 Subject: vxlan: fix potential NULL dereference in arp_reduce() This patch fixes a NULL pointer dereference in the event of an skb allocation failure in arp_reduce(). Signed-Off-By: David L Stevens Acked-by: Cong Wang Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b0f705c2378f..a7eb3f28db6c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1318,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) neigh_release(n); + if (reply == NULL) + goto out; + skb_reset_mac_header(reply); __skb_pull(reply, skb_network_offset(reply)); reply->ip_summed = CHECKSUM_UNNECESSARY; -- cgit v1.2.3 From 3e3d35402140f15a626f24afafb0d7fc3b2e5aa2 Mon Sep 17 00:00:00 2001 From: Peter Senna Tschudin Date: Tue, 18 Mar 2014 17:11:24 +0100 Subject: ATHEROS-ATL1E: Convert iounmap to pci_iounmap Use pci_iounmap instead of iounmap when the virtual mapping was done with pci_iomap. A simplified version of the semantic patch that finds this issue is as follows: (http://coccinelle.lip6.fr/) // @r@ expression addr; @@ addr = pci_iomap(...) @rr@ expression r.addr; @@ * iounmap(addr) // Signed-off-by: Peter Senna Tschudin Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index d5c2d3e912e5..422aab27ea1b 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -2436,7 +2436,7 @@ err_reset: err_register: err_sw_init: err_eeprom: - iounmap(adapter->hw.hw_addr); + pci_iounmap(pdev, adapter->hw.hw_addr); err_init_netdev: err_ioremap: free_netdev(netdev); @@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev) unregister_netdev(netdev); atl1e_free_ring_resources(adapter); atl1e_force_ps(&adapter->hw); - iounmap(adapter->hw.hw_addr); + pci_iounmap(pdev, adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); -- cgit v1.2.3 From ae996154f72fdd116291f69ec6c856be9c77042a Mon Sep 17 00:00:00 2001 From: Roger Luethi Date: Tue, 18 Mar 2014 18:14:01 +0100 Subject: via-rhine: Disable device in error path Currently, via-rhine fails to call pci_disable_device() for errors in rhine_init_one(). Reported-by: Huqiu Liu Signed-off-by: Roger Luethi Signed-off-by: David S. Miller --- drivers/net/ethernet/via/via-rhine.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ef312bc6b865..6ac20a6738f4 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card!?\n"); - goto err_out; + goto err_out_pci_disable; } /* sanity check */ @@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (pci_resource_len(pdev, 1) < io_size)) { rc = -EIO; dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); - goto err_out; + goto err_out_pci_disable; } pioaddr = pci_resource_start(pdev, 0); @@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev = alloc_etherdev(sizeof(struct rhine_private)); if (!dev) { rc = -ENOMEM; - goto err_out; + goto err_out_pci_disable; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -1084,6 +1084,8 @@ err_out_free_res: pci_release_regions(pdev); err_out_free_netdev: free_netdev(dev); +err_out_pci_disable: + pci_disable_device(pdev); err_out: return rc; } -- cgit v1.2.3 From f9b8c4c8baded129535d82d74df8e87a7a369f54 Mon Sep 17 00:00:00 2001 From: Ben Pfaff Date: Thu, 20 Mar 2014 10:45:21 -0700 Subject: openvswitch: Correctly report flow used times for first 5 minutes after boot. The kernel starts out its "jiffies" timer as 5 minutes below zero, as shown in include/linux/jiffies.h: /* * Have the 32 bit jiffies value wrap 5 minutes after boot * so jiffies wrap bugs show up earlier. */ #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) The loop in ovs_flow_stats_get() starts out with 'used' set to 0, then takes any "later" time. This means that for the first five minutes after boot, flows will always be reported as never used, since 0 is greater than any time already seen. Signed-off-by: Ben Pfaff Acked-by: Pravin B Shelar Signed-off-by: Jesse Gross --- net/openvswitch/flow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index d71e60fa28cb..dda451f4429c 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -92,7 +92,7 @@ static void stats_read(struct flow_stats *stats, unsigned long *used, __be16 *tcp_flags) { spin_lock(&stats->lock); - if (time_after(stats->used, *used)) + if (!*used || time_after(stats->used, *used)) *used = stats->used; *tcp_flags |= stats->tcp_flags; ovs_stats->n_packets += stats->packet_count; -- cgit v1.2.3 From 88050049e7111f074e8887ba5a4cefc9f8fa8d41 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Mar 2014 21:54:20 -0700 Subject: netlink: fix setsockopt in mmap examples in documentation The documentation for how to use netlink mmap interface is incorrect. The calls to setsockopt() require an additional argument. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- Documentation/networking/netlink_mmap.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt index b26122973525..c6af4bac5aa8 100644 --- a/Documentation/networking/netlink_mmap.txt +++ b/Documentation/networking/netlink_mmap.txt @@ -226,9 +226,9 @@ Ring setup: void *rx_ring, *tx_ring; /* Configure ring parameters */ - if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0) + if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0) exit(1); - if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0) + if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0) exit(1) /* Calculate size of each individual ring */ -- cgit v1.2.3 From 632623153196bf183a69686ed9c07eee98ff1bf8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 19 Mar 2014 21:02:21 -0700 Subject: tcp: syncookies: do not use getnstimeofday() While it is true that getnstimeofday() uses about 40 cycles if TSC is available, it can use 1600 cycles if hpet is the clocksource. Switch to get_jiffies_64(), as this is more than enough, and go back to 60 seconds periods. Fixes: 8c27bd75f04f ("tcp: syncookies: reduce cookie lifetime to 128 seconds") Signed-off-by: Eric Dumazet Cc: Florian Westphal Acked-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/tcp.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 8c4dd63134d4..743accec6c76 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, #ifdef CONFIG_SYN_COOKIES #include -/* Syncookies use a monotonic timer which increments every 64 seconds. +/* Syncookies use a monotonic timer which increments every 60 seconds. * This counter is used both as a hash input and partially encoded into * the cookie value. A cookie is only validated further if the delta * between the current counter value and the encoded one is less than this, - * i.e. a sent cookie is valid only at most for 128 seconds (or less if + * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if * the counter advances immediately after a cookie is generated). */ #define MAX_SYNCOOKIE_AGE 2 static inline u32 tcp_cookie_time(void) { - struct timespec now; - getnstimeofday(&now); - return now.tv_sec >> 6; /* 64 seconds granularity */ + u64 val = get_jiffies_64(); + + do_div(val, 60 * HZ); + return val; } u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, -- cgit v1.2.3 From 1c104a6bebf3c16b6248408b84f91d09ac8a26b6 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 19 Mar 2014 17:47:49 +0100 Subject: rtnetlink: fix fdb notification flags Commit 3ff661c38c84 ("net: rtnetlink notify events for FDB NTF_SELF adds and deletes") reuses the function nlmsg_populate_fdb_fill() to notify fdb events. But this function was used only for dump and thus was always setting the flag NLM_F_MULTI, which is wrong in case of a single notification. Libraries like libnl will wait forever for NLMSG_DONE. CC: Thomas Graf Signed-off-by: Nicolas Dichtel Acked-by: Thomas Graf Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a0dac2ef9ad..120eecc0f5a4 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2121,12 +2121,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo); static int nlmsg_populate_fdb_fill(struct sk_buff *skb, struct net_device *dev, u8 *addr, u32 pid, u32 seq, - int type, unsigned int flags) + int type, unsigned int flags, + int nlflags) { struct nlmsghdr *nlh; struct ndmsg *ndm; - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); if (!nlh) return -EMSGSIZE; @@ -2164,7 +2165,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type) if (!skb) goto errout; - err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); + err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0); if (err < 0) { kfree_skb(skb); goto errout; @@ -2389,7 +2390,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb, err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, portid, seq, - RTM_NEWNEIGH, NTF_SELF); + RTM_NEWNEIGH, NTF_SELF, + NLM_F_MULTI); if (err < 0) return err; skip: -- cgit v1.2.3 From 65886f439ab0fdc2dff20d1fa87afb98c6717472 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 19 Mar 2014 17:47:50 +0100 Subject: ipmr: fix mfc notification flags Commit 8cd3ac9f9b7b ("ipmr: advertise new mfc entries via rtnl") reuses the function ipmr_fill_mroute() to notify mfc events. But this function was used only for dump and thus was always setting the flag NLM_F_MULTI, which is wrong in case of a single notification. Libraries like libnl will wait forever for NLMSG_DONE. CC: Thomas Graf Signed-off-by: Nicolas Dichtel Acked-by: Thomas Graf Signed-off-by: David S. Miller --- net/ipv4/ipmr.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b9b3472975ba..28863570dd60 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2255,13 +2255,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, } static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, - u32 portid, u32 seq, struct mfc_cache *c, int cmd) + u32 portid, u32 seq, struct mfc_cache *c, int cmd, + int flags) { struct nlmsghdr *nlh; struct rtmsg *rtm; int err; - nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); if (nlh == NULL) return -EMSGSIZE; @@ -2329,7 +2330,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, if (skb == NULL) goto errout; - err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); + err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); if (err < 0) goto errout; @@ -2368,7 +2369,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (ipmr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) goto done; next_entry: e++; @@ -2382,7 +2384,8 @@ next_entry: if (ipmr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) { + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) { spin_unlock_bh(&mfc_unres_lock); goto done; } -- cgit v1.2.3 From f518338b16038beeb73e155e60d0f70beb9379f4 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 19 Mar 2014 17:47:51 +0100 Subject: ip6mr: fix mfc notification flags Commit 812e44dd1829 ("ip6mr: advertise new mfc entries via rtnl") reuses the function ip6mr_fill_mroute() to notify mfc events. But this function was used only for dump and thus was always setting the flag NLM_F_MULTI, which is wrong in case of a single notification. Libraries like libnl will wait forever for NLMSG_DONE. CC: Thomas Graf Signed-off-by: Nicolas Dichtel Acked-by: Thomas Graf Signed-off-by: David S. Miller --- net/ipv6/ip6mr.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0eb4038a4d63..8737400af0a0 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net, } static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, - u32 portid, u32 seq, struct mfc6_cache *c, int cmd) + u32 portid, u32 seq, struct mfc6_cache *c, int cmd, + int flags) { struct nlmsghdr *nlh; struct rtmsg *rtm; int err; - nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); if (nlh == NULL) return -EMSGSIZE; @@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc, if (skb == NULL) goto errout; - err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); + err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); if (err < 0) goto errout; @@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (ip6mr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) goto done; next_entry: e++; @@ -2476,7 +2478,8 @@ next_entry: if (ip6mr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) { + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) { spin_unlock_bh(&mfc_unres_lock); goto done; } -- cgit v1.2.3 From ebf4ad955d3e26d4d2a33709624fc7b5b9d3b969 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Fri, 21 Mar 2014 01:52:48 -0500 Subject: net: micrel : ks8851-ml: add vdd-supply support Few platforms use external regulator to keep the ethernet MAC supplied. So, request and enable the regulator for driver functionality. Fixes: 66fda75f47dc (regulator: core: Replace direct ops->disable usage) Reported-by: Russell King Suggested-by: Markus Pargmann Signed-off-by: Nishanth Menon Signed-off-by: David S. Miller --- .../devicetree/bindings/net/micrel-ks8851.txt | 1 + drivers/net/ethernet/micrel/ks8851.c | 30 +++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt index 11ace3c3d805..4fc392763611 100644 --- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt +++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt @@ -7,3 +7,4 @@ Required properties: Optional properties: - local-mac-address : Ethernet mac address to use +- vdd-supply: supply for Ethernet mac diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 727b546a9eb8..e0c92e0e5e1d 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -83,6 +84,7 @@ union ks8851_tx_hdr { * @rc_rxqcr: Cached copy of KS_RXQCR. * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. + * @vdd_reg: Optional regulator supplying the chip * * The @lock ensures that the chip is protected when certain operations are * in progress. When the read or write packet transfer is in progress, most @@ -130,6 +132,7 @@ struct ks8851_net { struct spi_transfer spi_xfer2[2]; struct eeprom_93cx6 eeprom; + struct regulator *vdd_reg; }; static int msg_enable; @@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi) ks->spidev = spi; ks->tx_space = 6144; + ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); + if (IS_ERR(ks->vdd_reg)) { + ret = PTR_ERR(ks->vdd_reg); + if (ret == -EPROBE_DEFER) + goto err_reg; + } else { + ret = regulator_enable(ks->vdd_reg); + if (ret) { + dev_err(&spi->dev, "regulator enable fail: %d\n", + ret); + goto err_reg_en; + } + } + + mutex_init(&ks->lock); spin_lock_init(&ks->statelock); @@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi) err_netdev: free_irq(ndev->irq, ks); -err_id: err_irq: +err_id: + if (!IS_ERR(ks->vdd_reg)) + regulator_disable(ks->vdd_reg); +err_reg_en: + if (!IS_ERR(ks->vdd_reg)) + regulator_put(ks->vdd_reg); +err_reg: free_netdev(ndev); return ret; } @@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi) unregister_netdev(priv->netdev); free_irq(spi->irq, priv); + if (!IS_ERR(priv->vdd_reg)) { + regulator_disable(priv->vdd_reg); + regulator_put(priv->vdd_reg); + } free_netdev(priv->netdev); return 0; -- cgit v1.2.3 From c27f0872a3448c46e561e226b5b97f77187b06d2 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Fri, 21 Mar 2014 20:53:57 +0800 Subject: netpoll: fix the skb check in pkt_is_ns Neighbor Solicitation is ipv6 protocol, so we should check skb->protocol with ETH_P_IPV6 Signed-off-by: Li RongQing Cc: WANG Cong Signed-off-by: David S. Miller --- net/core/netpoll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a664f7829a6d..df9e6b1a9759 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb) struct nd_msg *msg; struct ipv6hdr *hdr; - if (skb->protocol != htons(ETH_P_ARP)) + if (skb->protocol != htons(ETH_P_IPV6)) return false; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) return false; -- cgit v1.2.3 From 33b7107f59a61236d94ecd6b45e20283cd5abcc8 Mon Sep 17 00:00:00 2001 From: Christian Riesch Date: Mon, 24 Mar 2014 13:46:26 +0100 Subject: net: davinci_emac: Replace devm_request_irq with request_irq In commit 6892b41d9701283085b655c6086fb57a5d63fa47 Author: Lad, Prabhakar Date: Tue Jun 25 21:24:51 2013 +0530 net: davinci: emac: Convert to devm_* api the call of request_irq is replaced by devm_request_irq and the call of free_irq is removed. But since interrupts are requested in emac_dev_open, doing ifconfig up/down on the board requests the interrupts again each time, causing devm_request_irq to fail. The interface is dead until the device is rebooted. This patch reverts said commit partially: It changes the driver back to use request_irq instead of devm_request_irq, puts free_irq back in place, but keeps the remaining changes of the original patch. Reported-by: Jon Ringle Signed-off-by: Christian Riesch Cc: Lad, Prabhakar Cc: Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_emac.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index cd9b164a0434..251430450005 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1532,7 +1532,7 @@ static int emac_dev_open(struct net_device *ndev) struct device *emac_dev = &ndev->dev; u32 cnt; struct resource *res; - int ret; + int q, m, ret; int i = 0; int k = 0; struct emac_priv *priv = netdev_priv(ndev); @@ -1567,8 +1567,7 @@ static int emac_dev_open(struct net_device *ndev) while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { - if (devm_request_irq(&priv->pdev->dev, i, emac_irq, - 0, ndev->name, ndev)) + if (request_irq(i, emac_irq, 0, ndev->name, ndev)) goto rollback; } k++; @@ -1641,7 +1640,15 @@ static int emac_dev_open(struct net_device *ndev) rollback: - dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); + dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); + + for (q = k; k >= 0; k--) { + for (m = i; m >= res->start; m--) + free_irq(m, ndev); + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1); + m = res->end; + } + ret = -EBUSY; err: pm_runtime_put(&priv->pdev->dev); @@ -1659,6 +1666,9 @@ err: */ static int emac_dev_stop(struct net_device *ndev) { + struct resource *res; + int i = 0; + int irq_num; struct emac_priv *priv = netdev_priv(ndev); struct device *emac_dev = &ndev->dev; @@ -1674,6 +1684,13 @@ static int emac_dev_stop(struct net_device *ndev) if (priv->phydev) phy_disconnect(priv->phydev); + /* Free IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) + free_irq(irq_num, priv->ndev); + i++; + } + if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); -- cgit v1.2.3 From cd11cf505318ff24e42f35145f9cdf8596fa1958 Mon Sep 17 00:00:00 2001 From: Christian Riesch Date: Mon, 24 Mar 2014 13:46:27 +0100 Subject: net: davinci_emac: Fix rollback of emac_dev_open() If an error occurs during the initialization in emac_dev_open() (the driver's ndo_open function), interrupts, DMA descriptors etc. must be freed. The current rollback code is buggy in several ways. 1) Freeing the interrupts. The current code will not free all interrupts that were requested by the driver. Furthermore, the code tries to do a platform_get_resource(priv->pdev, IORESOURCE_IRQ, -1) in its last iteration. This patch fixes these bugs. 2) Wrong order of err: and rollback: labels. If the setup of the PHY in the code fails, the interrupts that have been requested before are not freed: request irq if requesting irqs fails, goto rollback setup phy if phy setup fails, goto err return 0 rollback: free irqs err: This patch brings the code into the correct order. 3) The code calls napi_enable() and emac_int_enable(), but does not undo both in case of an error. This patch adds calls of emac_int_disable() and napi_disable() to the rollback code. 4) RX DMA descriptors are not freed in case of an error: Right before requesting the irqs, the function creates DMA descriptors for the RX channel. These RX descriptors are never freed when we jump to either rollback or err. This patch adds code for freeing the DMA descriptors in the case of an initialization error. This required a modification of cpdma_ctrl_stop() in davinci_cpdma.c: We must be able to call this function to free the DMA descriptors while the DMA channels are in IDLE state (before cpdma_ctlr_start() was called). Tested on a custom board with the Texas Instruments AM1808. Signed-off-by: Christian Riesch Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 4 +-- drivers/net/ethernet/ti/davinci_emac.c | 44 ++++++++++++++++++++++----------- 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 364d0c7952c0..88ef27067bf2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) int i; spin_lock_irqsave(&ctlr->lock, flags); - if (ctlr->state != CPDMA_STATE_ACTIVE) { + if (ctlr->state == CPDMA_STATE_TEARDOWN) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } @@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) unsigned timeout; spin_lock_irqsave(&chan->lock, flags); - if (chan->state != CPDMA_STATE_ACTIVE) { + if (chan->state == CPDMA_STATE_TEARDOWN) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 251430450005..8f0e69ce07ca 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1533,8 +1533,8 @@ static int emac_dev_open(struct net_device *ndev) u32 cnt; struct resource *res; int q, m, ret; + int res_num = 0, irq_num = 0; int i = 0; - int k = 0; struct emac_priv *priv = netdev_priv(ndev); pm_runtime_get(&priv->pdev->dev); @@ -1564,14 +1564,24 @@ static int emac_dev_open(struct net_device *ndev) } /* Request IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, + res_num))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) { + dev_err(emac_dev, "Request IRQ %d\n", irq_num); + if (request_irq(irq_num, emac_irq, 0, ndev->name, + ndev)) { + dev_err(emac_dev, + "DaVinci EMAC: request_irq() failed\n"); + ret = -EBUSY; - while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { - for (i = res->start; i <= res->end; i++) { - if (request_irq(i, emac_irq, 0, ndev->name, ndev)) goto rollback; + } } - k++; + res_num++; } + /* prepare counters for rollback in case of an error */ + res_num--; + irq_num--; /* Start/Enable EMAC hardware */ emac_hw_enable(priv); @@ -1638,19 +1648,23 @@ static int emac_dev_open(struct net_device *ndev) return 0; -rollback: - - dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); +err: + emac_int_disable(priv); + napi_disable(&priv->napi); - for (q = k; k >= 0; k--) { - for (m = i; m >= res->start; m--) +rollback: + for (q = res_num; q >= 0; q--) { + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); + /* at the first iteration, irq_num is already set to the + * right value + */ + if (q != res_num) + irq_num = res->end; + + for (m = irq_num; m >= res->start; m--) free_irq(m, ndev); - res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1); - m = res->end; } - - ret = -EBUSY; -err: + cpdma_ctlr_stop(priv->dma); pm_runtime_put(&priv->pdev->dev); return ret; } -- cgit v1.2.3 From 4b29dba9c085a4fb79058fb1c45a2f6257ca3dfa Mon Sep 17 00:00:00 2001 From: David Stevens Date: Mon, 24 Mar 2014 10:39:58 -0400 Subject: vxlan: fix nonfunctional neigh_reduce() The VXLAN neigh_reduce() code is completely non-functional since check-in. Specific errors: 1) The original code drops all packets with a multicast destination address, even though neighbor solicitations are sent to the solicited-node address, a multicast address. The code after this check was never run. 2) The neighbor table lookup used the IPv6 header destination, which is the solicited node address, rather than the target address from the neighbor solicitation. So neighbor lookups would always fail if it got this far. Also for L3MISSes. 3) The code calls ndisc_send_na(), which does a send on the tunnel device. The context for neigh_reduce() is the transmit path, vxlan_xmit(), where the host or a bridge-attached neighbor is trying to transmit a neighbor solicitation. To respond to it, the tunnel endpoint needs to do a *receive* of the appropriate neighbor advertisement. Doing a send, would only try to send the advertisement, encapsulated, to the remote destinations in the fdb -- hosts that definitely did not do the corresponding solicitation. 4) The code uses the tunnel endpoint IPv6 forwarding flag to determine the isrouter flag in the advertisement. This has nothing to do with whether or not the target is a router, and generally won't be set since the tunnel endpoint is bridging, not routing, traffic. The patch below creates a proxy neighbor advertisement to respond to neighbor solicitions as intended, providing proper IPv6 support for neighbor reduction. Signed-off-by: David L Stevens Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 113 insertions(+), 14 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a7eb3f28db6c..1236812c7be6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1342,15 +1342,103 @@ out: } #if IS_ENABLED(CONFIG_IPV6) + +static struct sk_buff *vxlan_na_create(struct sk_buff *request, + struct neighbour *n, bool isrouter) +{ + struct net_device *dev = request->dev; + struct sk_buff *reply; + struct nd_msg *ns, *na; + struct ipv6hdr *pip6; + u8 *daddr; + int na_olen = 8; /* opt hdr + ETH_ALEN for target */ + int ns_olen; + int i, len; + + if (dev == NULL) + return NULL; + + len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + + sizeof(*na) + na_olen + dev->needed_tailroom; + reply = alloc_skb(len, GFP_ATOMIC); + if (reply == NULL) + return NULL; + + reply->protocol = htons(ETH_P_IPV6); + reply->dev = dev; + skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); + skb_push(reply, sizeof(struct ethhdr)); + skb_set_mac_header(reply, 0); + + ns = (struct nd_msg *)skb_transport_header(request); + + daddr = eth_hdr(request)->h_source; + ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); + for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { + daddr = ns->opt + i + sizeof(struct nd_opt_hdr); + break; + } + } + + /* Ethernet header */ + ether_addr_copy(eth_hdr(reply)->h_dest, daddr); + ether_addr_copy(eth_hdr(reply)->h_source, n->ha); + eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); + reply->protocol = htons(ETH_P_IPV6); + + skb_pull(reply, sizeof(struct ethhdr)); + skb_set_network_header(reply, 0); + skb_put(reply, sizeof(struct ipv6hdr)); + + /* IPv6 header */ + + pip6 = ipv6_hdr(reply); + memset(pip6, 0, sizeof(struct ipv6hdr)); + pip6->version = 6; + pip6->priority = ipv6_hdr(request)->priority; + pip6->nexthdr = IPPROTO_ICMPV6; + pip6->hop_limit = 255; + pip6->daddr = ipv6_hdr(request)->saddr; + pip6->saddr = *(struct in6_addr *)n->primary_key; + + skb_pull(reply, sizeof(struct ipv6hdr)); + skb_set_transport_header(reply, 0); + + na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); + + /* Neighbor Advertisement */ + memset(na, 0, sizeof(*na)+na_olen); + na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; + na->icmph.icmp6_router = isrouter; + na->icmph.icmp6_override = 1; + na->icmph.icmp6_solicited = 1; + na->target = ns->target; + ether_addr_copy(&na->opt[2], n->ha); + na->opt[0] = ND_OPT_TARGET_LL_ADDR; + na->opt[1] = na_olen >> 3; + + na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, + &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, + csum_partial(na, sizeof(*na)+na_olen, 0)); + + pip6->payload_len = htons(sizeof(*na)+na_olen); + + skb_push(reply, sizeof(struct ipv6hdr)); + + reply->ip_summed = CHECKSUM_UNNECESSARY; + + return reply; +} + static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct neighbour *n; - union vxlan_addr ipa; + struct nd_msg *msg; const struct ipv6hdr *iphdr; const struct in6_addr *saddr, *daddr; - struct nd_msg *msg; - struct inet6_dev *in6_dev = NULL; + struct neighbour *n; + struct inet6_dev *in6_dev; in6_dev = __in6_dev_get(dev); if (!in6_dev) @@ -1363,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) saddr = &iphdr->saddr; daddr = &iphdr->daddr; - if (ipv6_addr_loopback(daddr) || - ipv6_addr_is_multicast(daddr)) - goto out; - msg = (struct nd_msg *)skb_transport_header(skb); if (msg->icmph.icmp6_code != 0 || msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) goto out; - n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev); + if (ipv6_addr_loopback(daddr) || + ipv6_addr_is_multicast(&msg->target)) + goto out; + + n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); if (n) { struct vxlan_fdb *f; + struct sk_buff *reply; if (!(n->nud_state & NUD_CONNECTED)) { neigh_release(n); @@ -1389,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) goto out; } - ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target, - !!in6_dev->cnf.forwarding, - true, false, false); + reply = vxlan_na_create(skb, n, + !!(f ? f->flags & NTF_ROUTER : 0)); + neigh_release(n); + + if (reply == NULL) + goto out; + + if (netif_rx_ni(reply) == NET_RX_DROP) + dev->stats.rx_dropped++; + } else if (vxlan->flags & VXLAN_F_L3MISS) { - ipa.sin6.sin6_addr = *daddr; - ipa.sa.sa_family = AF_INET6; + union vxlan_addr ipa = { + .sin6.sin6_addr = msg->target, + .sa.sa_family = AF_INET6, + }; + vxlan_ip_miss(dev, &ipa); } -- cgit v1.2.3 From a5d0e7c037119484a7006b883618bfa87996cb41 Mon Sep 17 00:00:00 2001 From: Erik Hugne Date: Mon, 24 Mar 2014 16:56:38 +0100 Subject: tipc: fix spinlock recursion bug for failed subscriptions If a topology event subscription fails for any reason, such as out of memory, max number reached or because we received an invalid request the correct behavior is to terminate the subscribers connection to the topology server. This is currently broken and produces the following oops: [27.953662] tipc: Subscription rejected, illegal request [27.955329] BUG: spinlock recursion on CPU#1, kworker/u4:0/6 [27.957066] lock: 0xffff88003c67f408, .magic: dead4ead, .owner: kworker/u4:0/6, .owner_cpu: 1 [27.958054] CPU: 1 PID: 6 Comm: kworker/u4:0 Not tainted 3.14.0-rc6+ #5 [27.960230] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 [27.960874] Workqueue: tipc_rcv tipc_recv_work [tipc] [27.961430] ffff88003c67f408 ffff88003de27c18 ffffffff815c0207 ffff88003de1c050 [27.962292] ffff88003de27c38 ffffffff815beec5 ffff88003c67f408 ffffffff817f0a8a [27.963152] ffff88003de27c58 ffffffff815beeeb ffff88003c67f408 ffffffffa0013520 [27.964023] Call Trace: [27.964292] [] dump_stack+0x45/0x56 [27.964874] [] spin_dump+0x8c/0x91 [27.965420] [] spin_bug+0x21/0x26 [27.965995] [] do_raw_spin_lock+0x116/0x140 [27.966631] [] _raw_spin_lock_bh+0x15/0x20 [27.967256] [] subscr_conn_shutdown_event+0x20/0xa0 [tipc] [27.968051] [] tipc_close_conn+0xa4/0xb0 [tipc] [27.968722] [] tipc_conn_terminate+0x1a/0x30 [tipc] [27.969436] [] subscr_conn_msg_event+0x1f2/0x2f0 [tipc] [27.970209] [] tipc_receive_from_sock+0x90/0xf0 [tipc] [27.970972] [] tipc_recv_work+0x29/0x50 [tipc] [27.971633] [] process_one_work+0x165/0x3e0 [27.972267] [] worker_thread+0x119/0x3a0 [27.972896] [] ? manage_workers.isra.25+0x2a0/0x2a0 [27.973622] [] kthread+0xdf/0x100 [27.974168] [] ? kthread_create_on_node+0x1a0/0x1a0 [27.974893] [] ret_from_fork+0x7c/0xb0 [27.975466] [] ? kthread_create_on_node+0x1a0/0x1a0 The recursion occurs when subscr_terminate tries to grab the subscriber lock, which is already taken by subscr_conn_msg_event. We fix this by checking if the request to establish a new subscription was successful, and if not we initiate termination of the subscriber after we have released the subscriber lock. Signed-off-by: Erik Hugne Reviewed-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/subscr.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 11c9ae00837d..642437231ad5 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -263,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s, * * Called with subscriber lock held. */ -static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, - struct tipc_subscriber *subscriber) -{ +static int subscr_subscribe(struct tipc_subscr *s, + struct tipc_subscriber *subscriber, + struct tipc_subscription **sub_p) { struct tipc_subscription *sub; int swap; @@ -276,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); subscr_cancel(s, subscriber); - return NULL; + return 0; } /* Refuse subscription if global limit exceeded */ if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { pr_warn("Subscription rejected, limit reached (%u)\n", TIPC_MAX_SUBSCRIPTIONS); - subscr_terminate(subscriber); - return NULL; + return -EINVAL; } /* Allocate subscription object */ sub = kmalloc(sizeof(*sub), GFP_ATOMIC); if (!sub) { pr_warn("Subscription rejected, no memory\n"); - subscr_terminate(subscriber); - return NULL; + return -ENOMEM; } /* Initialize subscription object */ @@ -306,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, (sub->seq.lower > sub->seq.upper)) { pr_warn("Subscription rejected, illegal request\n"); kfree(sub); - subscr_terminate(subscriber); - return NULL; + return -EINVAL; } INIT_LIST_HEAD(&sub->nameseq_list); list_add(&sub->subscription_list, &subscriber->subscription_list); @@ -320,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, (Handler)subscr_timeout, (unsigned long)sub); k_start_timer(&sub->timer, sub->timeout); } - - return sub; + *sub_p = sub; + return 0; } /* Handle one termination request for the subscriber */ @@ -335,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr, void *usr_data, void *buf, size_t len) { struct tipc_subscriber *subscriber = usr_data; - struct tipc_subscription *sub; + struct tipc_subscription *sub = NULL; spin_lock_bh(&subscriber->lock); - sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber); + if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) { + spin_unlock_bh(&subscriber->lock); + subscr_terminate(subscriber); + return; + } if (sub) tipc_nametbl_subscribe(sub); spin_unlock_bh(&subscriber->lock); -- cgit v1.2.3