summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/proc.c13
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c8
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/sched/Kconfig3
-rw-r--r--net/sched/cls_cgroup.c23
-rw-r--r--net/sunrpc/auth.c6
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c295
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c6
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c18
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c28
-rw-r--r--net/sunrpc/clnt.c16
-rw-r--r--net/sunrpc/rpc_pipe.c42
-rw-r--r--net/sunrpc/xdr.c50
-rw-r--r--net/xfrm/xfrm_proc.c17
31 files changed, 424 insertions, 167 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 446424027d24..09c66a449da6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5066,13 +5066,14 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
static void __net_exit default_device_exit(struct net *net)
{
- struct net_device *dev, *next;
+ struct net_device *dev;
/*
* Push all migratable of the network devices back to the
* initial network namespace
*/
rtnl_lock();
- for_each_netdev_safe(net, dev, next) {
+restart:
+ for_each_netdev(net, dev) {
int err;
char fb_name[IFNAMSIZ];
@@ -5083,7 +5084,7 @@ static void __net_exit default_device_exit(struct net *net)
/* Delete virtual devices */
if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
dev->rtnl_link_ops->dellink(dev);
- continue;
+ goto restart;
}
/* Push remaing network devices to init_net */
@@ -5094,6 +5095,7 @@ static void __net_exit default_device_exit(struct net *net)
__func__, dev->name, err);
BUG();
}
+ goto restart;
}
rtnl_unlock();
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9c3717a23cf7..f66c58df8953 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2414,7 +2414,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
@@ -2429,7 +2429,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct neigh_table *tbl = pde->data;
int cpu;
- for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index d5c2bacb713c..1747ccae8e8d 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -964,7 +964,6 @@ adjudge_to_death:
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
/*
* It is the last release_sock in its life. It will remove backlog.
@@ -978,6 +977,8 @@ adjudge_to_death:
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
/* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
goto out;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c7cda1ca8e65..f26ab38680de 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -633,8 +633,6 @@ void inet_csk_listen_stop(struct sock *sk)
acc_req = req->dl_next;
- percpu_counter_inc(sk->sk_prot->orphan_count);
-
local_bh_disable();
bh_lock_sock(child);
WARN_ON(sock_owned_by_user(child));
@@ -644,6 +642,8 @@ void inet_csk_listen_stop(struct sock *sk)
sock_orphan(child);
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
inet_csk_destroy_sock(child);
bh_unlock_sock(child);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 313ebf00ee36..6ba5c557690c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -291,7 +291,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
@@ -306,7 +306,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct net *net = seq_file_net(seq);
int cpu;
- for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 614958b7c276..eb62e58bff79 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -38,6 +38,7 @@
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
+#include <linux/bottom_half.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -50,13 +51,17 @@
static int sockstat_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
+ int orphans, sockets;
+
+ local_bh_disable();
+ orphans = percpu_counter_sum_positive(&tcp_orphan_count),
+ sockets = percpu_counter_sum_positive(&tcp_sockets_allocated),
+ local_bh_enable();
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
- sock_prot_inuse_get(net, &tcp_prot),
- (int)percpu_counter_sum_positive(&tcp_orphan_count),
- tcp_death_row.tw_count,
- (int)percpu_counter_sum_positive(&tcp_sockets_allocated),
+ sock_prot_inuse_get(net, &tcp_prot), orphans,
+ tcp_death_row.tw_count, sockets,
atomic_read(&tcp_memory_allocated));
seq_printf(seq, "UDP: inuse %d mem %d\n",
sock_prot_inuse_get(net, &udp_prot),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 77bfba975959..97f71153584f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -429,7 +429,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
@@ -442,7 +442,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
- for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1f3d52946b3b..f28acf11fc67 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1836,7 +1836,6 @@ adjudge_to_death:
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
@@ -1849,6 +1848,8 @@ adjudge_to_death:
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
goto out;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 10172487921b..9d839fa9331e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -51,6 +51,7 @@
*/
+#include <linux/bottom_half.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
@@ -1797,7 +1798,9 @@ static int tcp_v4_init_sock(struct sock *sk)
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
+ local_bh_disable();
percpu_counter_inc(&tcp_sockets_allocated);
+ local_bh_enable();
return 0;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8702b06cb60a..e8b8337a8310 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -23,6 +23,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/bottom_half.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -1830,7 +1831,9 @@ static int tcp_v6_init_sock(struct sock *sk)
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
+ local_bh_disable();
percpu_counter_inc(&tcp_sockets_allocated);
+ local_bh_enable();
return 0;
}
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 9394f539966a..3eb5e2660c49 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -507,7 +507,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* No cache entry or it is invalid, time to schedule */
dest = __ip_vs_lblc_schedule(svc);
if (!dest) {
- IP_VS_DBG(1, "no destination available\n");
+ IP_VS_ERR_RL("LBLC: no destination available\n");
return NULL;
}
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 92dc76a6842c..c04ce56c7f0f 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -690,7 +690,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* The cache entry is invalid, time to schedule */
dest = __ip_vs_lblcr_schedule(svc);
if (!dest) {
- IP_VS_DBG(1, "no destination available\n");
+ IP_VS_ERR_RL("LBLCR: no destination available\n");
read_unlock(&svc->sched_lock);
return NULL;
}
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index 51912cab777b..d0dadc8a65fd 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -66,11 +66,15 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
}
- if (least)
- IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n",
- IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
- atomic_read(&least->activeconns),
- atomic_read(&least->inactconns));
+ if (!least)
+ IP_VS_ERR_RL("LC: no destination available\n");
+ else
+ IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
+ "inactconns %d\n",
+ IP_VS_DBG_ADDR(svc->af, &least->addr),
+ ntohs(least->port),
+ atomic_read(&least->activeconns),
+ atomic_read(&least->inactconns));
return least;
}
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index 6758ad2ceaaf..694952db5026 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -95,8 +95,10 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
}
- if (!least)
+ if (!least) {
+ IP_VS_ERR_RL("NQ: no destination available\n");
return NULL;
+ }
out:
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 8fb51c169eb8..2d16ab7f8c1e 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -69,6 +69,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
q = q->next;
} while (q != p);
write_unlock(&svc->sched_lock);
+ IP_VS_ERR_RL("RR: no destination available\n");
return NULL;
out:
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 691a6a0086e1..20e4657d2f3b 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -84,6 +84,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
goto nextstage;
}
}
+ IP_VS_ERR_RL("SED: no destination available\n");
return NULL;
/*
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 0e53955ef139..75709ebeb630 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -219,6 +219,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0
|| is_overloaded(dest)) {
+ IP_VS_ERR_RL("SH: no destination available\n");
return NULL;
}
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 57b452bbb4ea..8e942565b47d 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -72,6 +72,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
goto nextstage;
}
}
+ IP_VS_ERR_RL("WLC: no destination available\n");
return NULL;
/*
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 2f618dc29c5b..f7d74ef1ecf9 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -155,6 +155,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (mark->cl == mark->cl->next) {
/* no dest entry */
+ IP_VS_ERR_RL("WRR: no destination available: "
+ "no destinations present\n");
dest = NULL;
goto out;
}
@@ -168,8 +170,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
*/
if (mark->cw == 0) {
mark->cl = &svc->destinations;
- IP_VS_ERR_RL("ip_vs_wrr_schedule(): "
- "no available servers\n");
+ IP_VS_ERR_RL("WRR: no destination "
+ "available\n");
dest = NULL;
goto out;
}
@@ -191,6 +193,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* back to the start, and no dest is found.
It is only possible when all dests are OVERLOADED */
dest = NULL;
+ IP_VS_ERR_RL("WRR: no destination available: "
+ "all destinations are overloaded\n");
goto out;
}
}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f37b9b74c6a8..4da54b0b9233 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -200,7 +200,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
@@ -215,7 +215,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct net *net = seq_file_net(seq);
int cpu;
- for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 4f7ef0db302b..929218a47620 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -335,9 +335,6 @@ config NET_CLS_CGROUP
Say Y here if you want to classify packets based on the control
cgroup of their process.
- To compile this code as a module, choose M here: the
- module will be called cls_cgroup.
-
config NET_EMATCH
bool "Extended Matches"
select NET_CLS
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 0d68b1975983..91a3db4a76f8 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -24,10 +24,16 @@ struct cgroup_cls_state
u32 classid;
};
-static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp)
+static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
{
- return (struct cgroup_cls_state *)
- cgroup_subsys_state(cgrp, net_cls_subsys_id);
+ return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
+ struct cgroup_cls_state, css);
+}
+
+static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
+{
+ return container_of(task_subsys_state(p, net_cls_subsys_id),
+ struct cgroup_cls_state, css);
}
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
@@ -39,19 +45,19 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
return ERR_PTR(-ENOMEM);
if (cgrp->parent)
- cs->classid = net_cls_state(cgrp->parent)->classid;
+ cs->classid = cgrp_cls_state(cgrp->parent)->classid;
return &cs->css;
}
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
- kfree(ss);
+ kfree(cgrp_cls_state(cgrp));
}
static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
{
- return net_cls_state(cgrp)->classid;
+ return cgrp_cls_state(cgrp)->classid;
}
static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
@@ -59,7 +65,7 @@ static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
- net_cls_state(cgrp)->classid = (u32) value;
+ cgrp_cls_state(cgrp)->classid = (u32) value;
cgroup_unlock();
@@ -115,8 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
return -1;
rcu_read_lock();
- cs = (struct cgroup_cls_state *) task_subsys_state(current,
- net_cls_subsys_id);
+ cs = task_cls_state(current);
if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
res->classid = cs->classid;
res->class = 0;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 0443f8349458..0c431c277af5 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -234,7 +234,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
/* Enforce a 60 second garbage collection moratorium */
- if (time_in_range(cred->cr_expire, expired, jiffies) &&
+ if (time_in_range_open(cred->cr_expire, expired, jiffies) &&
test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
continue;
@@ -515,7 +515,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
if (cred->cr_ops->crwrap_req)
return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
/* By default, we encode the arguments normally. */
- return rpc_call_xdrproc(encode, rqstp, data, obj);
+ return encode(rqstp, data, obj);
}
int
@@ -530,7 +530,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
data, obj);
/* By default, we decode the arguments normally. */
- return rpc_call_xdrproc(decode, rqstp, data, obj);
+ return decode(rqstp, data, obj);
}
int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 853a4142cea1..e630b38a6047 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -72,11 +72,25 @@ struct gss_auth {
struct gss_api_mech *mech;
enum rpc_gss_svc service;
struct rpc_clnt *client;
- struct dentry *dentry;
+ /*
+ * There are two upcall pipes; dentry[1], named "gssd", is used
+ * for the new text-based upcall; dentry[0] is named after the
+ * mechanism (for example, "krb5") and exists for
+ * backwards-compatibility with older gssd's.
+ */
+ struct dentry *dentry[2];
};
+/* pipe_version >= 0 if and only if someone has a pipe open. */
+static int pipe_version = -1;
+static atomic_t pipe_users = ATOMIC_INIT(0);
+static DEFINE_SPINLOCK(pipe_version_lock);
+static struct rpc_wait_queue pipe_version_rpc_waitqueue;
+static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
+
static void gss_free_ctx(struct gss_cl_ctx *);
-static struct rpc_pipe_ops gss_upcall_ops;
+static struct rpc_pipe_ops gss_upcall_ops_v0;
+static struct rpc_pipe_ops gss_upcall_ops_v1;
static inline struct gss_cl_ctx *
gss_get_ctx(struct gss_cl_ctx *ctx)
@@ -220,6 +234,7 @@ err:
return p;
}
+#define UPCALL_BUF_LEN 128
struct gss_upcall_msg {
atomic_t count;
@@ -227,16 +242,41 @@ struct gss_upcall_msg {
struct rpc_pipe_msg msg;
struct list_head list;
struct gss_auth *auth;
+ struct rpc_inode *inode;
struct rpc_wait_queue rpc_waitqueue;
wait_queue_head_t waitqueue;
struct gss_cl_ctx *ctx;
+ char databuf[UPCALL_BUF_LEN];
};
+static int get_pipe_version(void)
+{
+ int ret;
+
+ spin_lock(&pipe_version_lock);
+ if (pipe_version >= 0) {
+ atomic_inc(&pipe_users);
+ ret = pipe_version;
+ } else
+ ret = -EAGAIN;
+ spin_unlock(&pipe_version_lock);
+ return ret;
+}
+
+static void put_pipe_version(void)
+{
+ if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
+ pipe_version = -1;
+ spin_unlock(&pipe_version_lock);
+ }
+}
+
static void
gss_release_msg(struct gss_upcall_msg *gss_msg)
{
if (!atomic_dec_and_test(&gss_msg->count))
return;
+ put_pipe_version();
BUG_ON(!list_empty(&gss_msg->list));
if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx);
@@ -266,8 +306,8 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
static inline struct gss_upcall_msg *
gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
{
- struct inode *inode = gss_auth->dentry->d_inode;
- struct rpc_inode *rpci = RPC_I(inode);
+ struct rpc_inode *rpci = gss_msg->inode;
+ struct inode *inode = &rpci->vfs_inode;
struct gss_upcall_msg *old;
spin_lock(&inode->i_lock);
@@ -293,8 +333,7 @@ __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
static void
gss_unhash_msg(struct gss_upcall_msg *gss_msg)
{
- struct gss_auth *gss_auth = gss_msg->auth;
- struct inode *inode = gss_auth->dentry->d_inode;
+ struct inode *inode = &gss_msg->inode->vfs_inode;
if (list_empty(&gss_msg->list))
return;
@@ -310,7 +349,7 @@ gss_upcall_callback(struct rpc_task *task)
struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
struct gss_cred, gc_base);
struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
- struct inode *inode = gss_msg->auth->dentry->d_inode;
+ struct inode *inode = &gss_msg->inode->vfs_inode;
spin_lock(&inode->i_lock);
if (gss_msg->ctx)
@@ -323,22 +362,75 @@ gss_upcall_callback(struct rpc_task *task)
gss_release_msg(gss_msg);
}
+static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
+{
+ gss_msg->msg.data = &gss_msg->uid;
+ gss_msg->msg.len = sizeof(gss_msg->uid);
+}
+
+static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
+ struct rpc_clnt *clnt, int machine_cred)
+{
+ char *p = gss_msg->databuf;
+ int len = 0;
+
+ gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
+ gss_msg->auth->mech->gm_name,
+ gss_msg->uid);
+ p += gss_msg->msg.len;
+ if (clnt->cl_principal) {
+ len = sprintf(p, "target=%s ", clnt->cl_principal);
+ p += len;
+ gss_msg->msg.len += len;
+ }
+ if (machine_cred) {
+ len = sprintf(p, "service=* ");
+ p += len;
+ gss_msg->msg.len += len;
+ } else if (!strcmp(clnt->cl_program->name, "nfs4_cb")) {
+ len = sprintf(p, "service=nfs ");
+ p += len;
+ gss_msg->msg.len += len;
+ }
+ len = sprintf(p, "\n");
+ gss_msg->msg.len += len;
+
+ gss_msg->msg.data = gss_msg->databuf;
+ BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN);
+}
+
+static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
+ struct rpc_clnt *clnt, int machine_cred)
+{
+ if (pipe_version == 0)
+ gss_encode_v0_msg(gss_msg);
+ else /* pipe_version == 1 */
+ gss_encode_v1_msg(gss_msg, clnt, machine_cred);
+}
+
static inline struct gss_upcall_msg *
-gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
+gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
+ int machine_cred)
{
struct gss_upcall_msg *gss_msg;
+ int vers;
gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
- if (gss_msg != NULL) {
- INIT_LIST_HEAD(&gss_msg->list);
- rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
- init_waitqueue_head(&gss_msg->waitqueue);
- atomic_set(&gss_msg->count, 1);
- gss_msg->msg.data = &gss_msg->uid;
- gss_msg->msg.len = sizeof(gss_msg->uid);
- gss_msg->uid = uid;
- gss_msg->auth = gss_auth;
+ if (gss_msg == NULL)
+ return ERR_PTR(-ENOMEM);
+ vers = get_pipe_version();
+ if (vers < 0) {
+ kfree(gss_msg);
+ return ERR_PTR(vers);
}
+ gss_msg->inode = RPC_I(gss_auth->dentry[vers]->d_inode);
+ INIT_LIST_HEAD(&gss_msg->list);
+ rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
+ init_waitqueue_head(&gss_msg->waitqueue);
+ atomic_set(&gss_msg->count, 1);
+ gss_msg->uid = uid;
+ gss_msg->auth = gss_auth;
+ gss_encode_msg(gss_msg, clnt, machine_cred);
return gss_msg;
}
@@ -350,16 +442,13 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
struct gss_upcall_msg *gss_new, *gss_msg;
uid_t uid = cred->cr_uid;
- /* Special case: rpc.gssd assumes that uid == 0 implies machine creds */
- if (gss_cred->gc_machine_cred != 0)
- uid = 0;
-
- gss_new = gss_alloc_msg(gss_auth, uid);
- if (gss_new == NULL)
- return ERR_PTR(-ENOMEM);
+ gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
+ if (IS_ERR(gss_new))
+ return gss_new;
gss_msg = gss_add_msg(gss_auth, gss_new);
if (gss_msg == gss_new) {
- int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
+ struct inode *inode = &gss_new->inode->vfs_inode;
+ int res = rpc_queue_upcall(inode, &gss_new->msg);
if (res) {
gss_unhash_msg(gss_new);
gss_msg = ERR_PTR(res);
@@ -369,6 +458,18 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
return gss_msg;
}
+static void warn_gssd(void)
+{
+ static unsigned long ratelimit;
+ unsigned long now = jiffies;
+
+ if (time_after(now, ratelimit)) {
+ printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
+ "Please check user daemon is running.\n");
+ ratelimit = now + 15*HZ;
+ }
+}
+
static inline int
gss_refresh_upcall(struct rpc_task *task)
{
@@ -378,16 +479,25 @@ gss_refresh_upcall(struct rpc_task *task)
struct gss_cred *gss_cred = container_of(cred,
struct gss_cred, gc_base);
struct gss_upcall_msg *gss_msg;
- struct inode *inode = gss_auth->dentry->d_inode;
+ struct inode *inode;
int err = 0;
dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
cred->cr_uid);
gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
+ if (IS_ERR(gss_msg) == -EAGAIN) {
+ /* XXX: warning on the first, under the assumption we
+ * shouldn't normally hit this case on a refresh. */
+ warn_gssd();
+ task->tk_timeout = 15*HZ;
+ rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
+ return 0;
+ }
if (IS_ERR(gss_msg)) {
err = PTR_ERR(gss_msg);
goto out;
}
+ inode = &gss_msg->inode->vfs_inode;
spin_lock(&inode->i_lock);
if (gss_cred->gc_upcall != NULL)
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
@@ -414,18 +524,29 @@ out:
static inline int
gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
{
- struct inode *inode = gss_auth->dentry->d_inode;
+ struct inode *inode;
struct rpc_cred *cred = &gss_cred->gc_base;
struct gss_upcall_msg *gss_msg;
DEFINE_WAIT(wait);
int err = 0;
dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
+retry:
gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
+ if (PTR_ERR(gss_msg) == -EAGAIN) {
+ err = wait_event_interruptible_timeout(pipe_version_waitqueue,
+ pipe_version >= 0, 15*HZ);
+ if (err)
+ goto out;
+ if (pipe_version < 0)
+ warn_gssd();
+ goto retry;
+ }
if (IS_ERR(gss_msg)) {
err = PTR_ERR(gss_msg);
goto out;
}
+ inode = &gss_msg->inode->vfs_inode;
for (;;) {
prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
spin_lock(&inode->i_lock);
@@ -543,6 +664,38 @@ out:
return err;
}
+static int gss_pipe_open(struct inode *inode, int new_version)
+{
+ int ret = 0;
+
+ spin_lock(&pipe_version_lock);
+ if (pipe_version < 0) {
+ /* First open of any gss pipe determines the version: */
+ pipe_version = new_version;
+ rpc_wake_up(&pipe_version_rpc_waitqueue);
+ wake_up(&pipe_version_waitqueue);
+ } else if (pipe_version != new_version) {
+ /* Trying to open a pipe of a different version */
+ ret = -EBUSY;
+ goto out;
+ }
+ atomic_inc(&pipe_users);
+out:
+ spin_unlock(&pipe_version_lock);
+ return ret;
+
+}
+
+static int gss_pipe_open_v0(struct inode *inode)
+{
+ return gss_pipe_open(inode, 0);
+}
+
+static int gss_pipe_open_v1(struct inode *inode)
+{
+ return gss_pipe_open(inode, 1);
+}
+
static void
gss_pipe_release(struct inode *inode)
{
@@ -562,27 +715,22 @@ gss_pipe_release(struct inode *inode)
spin_lock(&inode->i_lock);
}
spin_unlock(&inode->i_lock);
+
+ put_pipe_version();
}
static void
gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
{
struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
- static unsigned long ratelimit;
if (msg->errno < 0) {
dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
gss_msg);
atomic_inc(&gss_msg->count);
gss_unhash_msg(gss_msg);
- if (msg->errno == -ETIMEDOUT) {
- unsigned long now = jiffies;
- if (time_after(now, ratelimit)) {
- printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
- "Please check user daemon is running!\n");
- ratelimit = now + 15*HZ;
- }
- }
+ if (msg->errno == -ETIMEDOUT)
+ warn_gssd();
gss_release_msg(gss_msg);
}
}
@@ -623,20 +771,38 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
atomic_set(&auth->au_count, 1);
kref_init(&gss_auth->kref);
- gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
- clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
- if (IS_ERR(gss_auth->dentry)) {
- err = PTR_ERR(gss_auth->dentry);
+ /*
+ * Note: if we created the old pipe first, then someone who
+ * examined the directory at the right moment might conclude
+ * that we supported only the old pipe. So we instead create
+ * the new pipe first.
+ */
+ gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_dentry,
+ "gssd",
+ clnt, &gss_upcall_ops_v1,
+ RPC_PIPE_WAIT_FOR_OPEN);
+ if (IS_ERR(gss_auth->dentry[1])) {
+ err = PTR_ERR(gss_auth->dentry[1]);
goto err_put_mech;
}
+ gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_dentry,
+ gss_auth->mech->gm_name,
+ clnt, &gss_upcall_ops_v0,
+ RPC_PIPE_WAIT_FOR_OPEN);
+ if (IS_ERR(gss_auth->dentry[0])) {
+ err = PTR_ERR(gss_auth->dentry[0]);
+ goto err_unlink_pipe_1;
+ }
err = rpcauth_init_credcache(auth);
if (err)
- goto err_unlink_pipe;
+ goto err_unlink_pipe_0;
return auth;
-err_unlink_pipe:
- rpc_unlink(gss_auth->dentry);
+err_unlink_pipe_0:
+ rpc_unlink(gss_auth->dentry[0]);
+err_unlink_pipe_1:
+ rpc_unlink(gss_auth->dentry[1]);
err_put_mech:
gss_mech_put(gss_auth->mech);
err_free:
@@ -649,8 +815,8 @@ out_dec:
static void
gss_free(struct gss_auth *gss_auth)
{
- rpc_unlink(gss_auth->dentry);
- gss_auth->dentry = NULL;
+ rpc_unlink(gss_auth->dentry[1]);
+ rpc_unlink(gss_auth->dentry[0]);
gss_mech_put(gss_auth->mech);
kfree(gss_auth);
@@ -693,7 +859,7 @@ gss_destroying_context(struct rpc_cred *cred)
struct rpc_task *task;
if (gss_cred->gc_ctx == NULL ||
- test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
+ test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
return 0;
gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
@@ -757,14 +923,12 @@ gss_free_cred_callback(struct rcu_head *head)
}
static void
-gss_destroy_cred(struct rpc_cred *cred)
+gss_destroy_nullcred(struct rpc_cred *cred)
{
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
- if (gss_destroying_context(cred))
- return;
rcu_assign_pointer(gss_cred->gc_ctx, NULL);
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
if (ctx)
@@ -772,6 +936,15 @@ gss_destroy_cred(struct rpc_cred *cred)
kref_put(&gss_auth->kref, gss_free_callback);
}
+static void
+gss_destroy_cred(struct rpc_cred *cred)
+{
+
+ if (gss_destroying_context(cred))
+ return;
+ gss_destroy_nullcred(cred);
+}
+
/*
* Lookup RPCSEC_GSS cred for the current process
*/
@@ -1017,7 +1190,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = rpc_call_xdrproc(encode, rqstp, p, obj);
+ status = encode(rqstp, p, obj);
if (status)
return status;
@@ -1111,7 +1284,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = rpc_call_xdrproc(encode, rqstp, p, obj);
+ status = encode(rqstp, p, obj);
if (status)
return status;
@@ -1170,12 +1343,12 @@ gss_wrap_req(struct rpc_task *task,
/* The spec seems a little ambiguous here, but I think that not
* wrapping context destruction requests makes the most sense.
*/
- status = rpc_call_xdrproc(encode, rqstp, p, obj);
+ status = encode(rqstp, p, obj);
goto out;
}
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
- status = rpc_call_xdrproc(encode, rqstp, p, obj);
+ status = encode(rqstp, p, obj);
break;
case RPC_GSS_SVC_INTEGRITY:
status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1291,7 +1464,7 @@ gss_unwrap_resp(struct rpc_task *task,
cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
+ (savedlen - head->iov_len);
out_decode:
- status = rpc_call_xdrproc(decode, rqstp, p, obj);
+ status = decode(rqstp, p, obj);
out:
gss_put_ctx(ctx);
dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
@@ -1324,7 +1497,7 @@ static const struct rpc_credops gss_credops = {
static const struct rpc_credops gss_nullops = {
.cr_name = "AUTH_GSS",
- .crdestroy = gss_destroy_cred,
+ .crdestroy = gss_destroy_nullcred,
.crbind = rpcauth_generic_bind_cred,
.crmatch = gss_match,
.crmarshal = gss_marshal,
@@ -1334,10 +1507,19 @@ static const struct rpc_credops gss_nullops = {
.crunwrap_resp = gss_unwrap_resp,
};
-static struct rpc_pipe_ops gss_upcall_ops = {
+static struct rpc_pipe_ops gss_upcall_ops_v0 = {
+ .upcall = gss_pipe_upcall,
+ .downcall = gss_pipe_downcall,
+ .destroy_msg = gss_pipe_destroy_msg,
+ .open_pipe = gss_pipe_open_v0,
+ .release_pipe = gss_pipe_release,
+};
+
+static struct rpc_pipe_ops gss_upcall_ops_v1 = {
.upcall = gss_pipe_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
+ .open_pipe = gss_pipe_open_v1,
.release_pipe = gss_pipe_release,
};
@@ -1354,6 +1536,7 @@ static int __init init_rpcsec_gss(void)
err = gss_svc_init();
if (err)
goto out_unregister;
+ rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
return 0;
out_unregister:
rpcauth_unregister(&authgss_ops);
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index d83b881685fe..c0ba39c4f5f2 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -152,7 +152,7 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size)
return(1 + der_length_size(body_size) + body_size);
}
-EXPORT_SYMBOL(g_token_size);
+EXPORT_SYMBOL_GPL(g_token_size);
/* fills in a buffer with the token header. The buffer is assumed to
be the right size. buf is advanced past the token header */
@@ -167,7 +167,7 @@ g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
TWRITE_STR(*buf, mech->data, ((int) mech->len));
}
-EXPORT_SYMBOL(g_make_token_header);
+EXPORT_SYMBOL_GPL(g_make_token_header);
/*
* Given a buffer containing a token, reads and verifies the token,
@@ -231,5 +231,5 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
return(ret);
}
-EXPORT_SYMBOL(g_verify_token_header);
+EXPORT_SYMBOL_GPL(g_verify_token_header);
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index bce9d527af08..6efbb0cd3c7c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -117,7 +117,7 @@ gss_mech_register(struct gss_api_mech *gm)
return 0;
}
-EXPORT_SYMBOL(gss_mech_register);
+EXPORT_SYMBOL_GPL(gss_mech_register);
void
gss_mech_unregister(struct gss_api_mech *gm)
@@ -129,7 +129,7 @@ gss_mech_unregister(struct gss_api_mech *gm)
gss_mech_free(gm);
}
-EXPORT_SYMBOL(gss_mech_unregister);
+EXPORT_SYMBOL_GPL(gss_mech_unregister);
struct gss_api_mech *
gss_mech_get(struct gss_api_mech *gm)
@@ -138,7 +138,7 @@ gss_mech_get(struct gss_api_mech *gm)
return gm;
}
-EXPORT_SYMBOL(gss_mech_get);
+EXPORT_SYMBOL_GPL(gss_mech_get);
struct gss_api_mech *
gss_mech_get_by_name(const char *name)
@@ -158,7 +158,7 @@ gss_mech_get_by_name(const char *name)
}
-EXPORT_SYMBOL(gss_mech_get_by_name);
+EXPORT_SYMBOL_GPL(gss_mech_get_by_name);
static inline int
mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor)
@@ -191,7 +191,7 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
return gm;
}
-EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor);
+EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
u32
gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
@@ -205,7 +205,7 @@ gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
}
return RPC_AUTH_MAXFLAVOR; /* illegal value */
}
-EXPORT_SYMBOL(gss_svc_to_pseudoflavor);
+EXPORT_SYMBOL_GPL(gss_svc_to_pseudoflavor);
u32
gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
@@ -219,7 +219,7 @@ gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
return 0;
}
-EXPORT_SYMBOL(gss_pseudoflavor_to_service);
+EXPORT_SYMBOL_GPL(gss_pseudoflavor_to_service);
char *
gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
@@ -233,7 +233,7 @@ gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
return NULL;
}
-EXPORT_SYMBOL(gss_service_to_auth_domain_name);
+EXPORT_SYMBOL_GPL(gss_service_to_auth_domain_name);
void
gss_mech_put(struct gss_api_mech * gm)
@@ -242,7 +242,7 @@ gss_mech_put(struct gss_api_mech * gm)
module_put(gm->gm_owner);
}
-EXPORT_SYMBOL(gss_mech_put);
+EXPORT_SYMBOL_GPL(gss_mech_put);
/* The mech could probably be determined from the token instead, but it's just
* as easy for now to pass it in. */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 81ae3d62a0cc..2278a50c6444 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -332,6 +332,7 @@ struct rsc {
struct svc_cred cred;
struct gss_svc_seq_data seqdata;
struct gss_ctx *mechctx;
+ char *client_name;
};
static struct cache_head *rsc_table[RSC_HASHMAX];
@@ -346,6 +347,7 @@ static void rsc_free(struct rsc *rsci)
gss_delete_sec_context(&rsci->mechctx);
if (rsci->cred.cr_group_info)
put_group_info(rsci->cred.cr_group_info);
+ kfree(rsci->client_name);
}
static void rsc_put(struct kref *ref)
@@ -383,6 +385,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
tmp->handle.data = NULL;
new->mechctx = NULL;
new->cred.cr_group_info = NULL;
+ new->client_name = NULL;
}
static void
@@ -397,6 +400,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
spin_lock_init(&new->seqdata.sd_lock);
new->cred = tmp->cred;
tmp->cred.cr_group_info = NULL;
+ new->client_name = tmp->client_name;
+ tmp->client_name = NULL;
}
static struct cache_head *
@@ -486,6 +491,15 @@ static int rsc_parse(struct cache_detail *cd,
status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
if (status)
goto out;
+
+ /* get client name */
+ len = qword_get(&mesg, buf, mlen);
+ if (len > 0) {
+ rsci.client_name = kstrdup(buf, GFP_KERNEL);
+ if (!rsci.client_name)
+ goto out;
+ }
+
}
rsci.h.expiry_time = expiry;
rscp = rsc_update(&rsci, rscp);
@@ -746,7 +760,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
return gd->pseudoflavor;
}
-EXPORT_SYMBOL(svcauth_gss_flavor);
+EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
int
svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
@@ -780,7 +794,7 @@ out:
return stat;
}
-EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor);
+EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
static inline int
read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
@@ -913,6 +927,16 @@ struct gss_svc_data {
struct rsc *rsci;
};
+char *svc_gss_principal(struct svc_rqst *rqstp)
+{
+ struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
+
+ if (gd && gd->rsci)
+ return gd->rsci->client_name;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(svc_gss_principal);
+
static int
svcauth_gss_set_client(struct svc_rqst *rqstp)
{
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 3ca518386d15..836f15c0c4a3 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -197,6 +197,12 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
clnt->cl_rtt = &clnt->cl_rtt_default;
rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
+ clnt->cl_principal = NULL;
+ if (args->client_name) {
+ clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
+ if (!clnt->cl_principal)
+ goto out_no_principal;
+ }
kref_init(&clnt->cl_kref);
@@ -226,6 +232,8 @@ out_no_auth:
rpc_put_mount();
}
out_no_path:
+ kfree(clnt->cl_principal);
+out_no_principal:
rpc_free_iostats(clnt->cl_metrics);
out_no_stats:
if (clnt->cl_server != clnt->cl_inline_name)
@@ -354,6 +362,11 @@ rpc_clone_client(struct rpc_clnt *clnt)
new->cl_metrics = rpc_alloc_iostats(clnt);
if (new->cl_metrics == NULL)
goto out_no_stats;
+ if (clnt->cl_principal) {
+ new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
+ if (new->cl_principal == NULL)
+ goto out_no_principal;
+ }
kref_init(&new->cl_kref);
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0)
@@ -366,6 +379,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
rpciod_up();
return new;
out_no_path:
+ kfree(new->cl_principal);
+out_no_principal:
rpc_free_iostats(new->cl_metrics);
out_no_stats:
kfree(new);
@@ -417,6 +432,7 @@ rpc_free_client(struct kref *kref)
out_free:
rpc_unregister_client(clnt);
rpc_free_iostats(clnt->cl_metrics);
+ kfree(clnt->cl_principal);
clnt->cl_metrics = NULL;
xprt_put(clnt->cl_xprt);
rpciod_down();
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 23a2b8f6dc49..192453248870 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -113,7 +113,7 @@ out:
wake_up(&rpci->waitq);
return res;
}
-EXPORT_SYMBOL(rpc_queue_upcall);
+EXPORT_SYMBOL_GPL(rpc_queue_upcall);
static inline void
rpc_inode_setowner(struct inode *inode, void *private)
@@ -126,13 +126,14 @@ rpc_close_pipes(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
struct rpc_pipe_ops *ops;
+ int need_release;
mutex_lock(&inode->i_mutex);
ops = rpci->ops;
if (ops != NULL) {
LIST_HEAD(free_list);
-
spin_lock(&inode->i_lock);
+ need_release = rpci->nreaders != 0 || rpci->nwriters != 0;
rpci->nreaders = 0;
list_splice_init(&rpci->in_upcall, &free_list);
list_splice_init(&rpci->pipe, &free_list);
@@ -141,7 +142,7 @@ rpc_close_pipes(struct inode *inode)
spin_unlock(&inode->i_lock);
rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
rpci->nwriters = 0;
- if (ops->release_pipe)
+ if (need_release && ops->release_pipe)
ops->release_pipe(inode);
cancel_delayed_work_sync(&rpci->queue_timeout);
}
@@ -169,16 +170,24 @@ static int
rpc_pipe_open(struct inode *inode, struct file *filp)
{
struct rpc_inode *rpci = RPC_I(inode);
+ int first_open;
int res = -ENXIO;
mutex_lock(&inode->i_mutex);
- if (rpci->ops != NULL) {
- if (filp->f_mode & FMODE_READ)
- rpci->nreaders ++;
- if (filp->f_mode & FMODE_WRITE)
- rpci->nwriters ++;
- res = 0;
+ if (rpci->ops == NULL)
+ goto out;
+ first_open = rpci->nreaders == 0 && rpci->nwriters == 0;
+ if (first_open && rpci->ops->open_pipe) {
+ res = rpci->ops->open_pipe(inode);
+ if (res)
+ goto out;
}
+ if (filp->f_mode & FMODE_READ)
+ rpci->nreaders++;
+ if (filp->f_mode & FMODE_WRITE)
+ rpci->nwriters++;
+ res = 0;
+out:
mutex_unlock(&inode->i_mutex);
return res;
}
@@ -188,6 +197,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
{
struct rpc_inode *rpci = RPC_I(inode);
struct rpc_pipe_msg *msg;
+ int last_close;
mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL)
@@ -214,7 +224,8 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
rpci->ops->destroy_msg, -EAGAIN);
}
}
- if (rpci->ops->release_pipe)
+ last_close = rpci->nwriters == 0 && rpci->nreaders == 0;
+ if (last_close && rpci->ops->release_pipe)
rpci->ops->release_pipe(inode);
out:
mutex_unlock(&inode->i_mutex);
@@ -396,6 +407,7 @@ enum {
RPCAUTH_nfs,
RPCAUTH_portmap,
RPCAUTH_statd,
+ RPCAUTH_nfsd4_cb,
RPCAUTH_RootEOF
};
@@ -429,6 +441,10 @@ static struct rpc_filelist files[] = {
.name = "statd",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
+ [RPCAUTH_nfsd4_cb] = {
+ .name = "nfsd4_cb",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
};
enum {
@@ -748,7 +764,7 @@ rpc_rmdir(struct dentry *dentry)
* @name: name of pipe
* @private: private data to associate with the pipe, for the caller's use
* @ops: operations defining the behavior of the pipe: upcall, downcall,
- * release_pipe, and destroy_msg.
+ * release_pipe, open_pipe, and destroy_msg.
* @flags: rpc_inode flags
*
* Data is made available for userspace to read by calls to
@@ -808,7 +824,7 @@ err_dput:
-ENOMEM);
goto out;
}
-EXPORT_SYMBOL(rpc_mkpipe);
+EXPORT_SYMBOL_GPL(rpc_mkpipe);
/**
* rpc_unlink - remove a pipe
@@ -839,7 +855,7 @@ rpc_unlink(struct dentry *dentry)
dput(parent);
return error;
}
-EXPORT_SYMBOL(rpc_unlink);
+EXPORT_SYMBOL_GPL(rpc_unlink);
/*
* populate the filesystem
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 79a55d56cc98..406e26de584e 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -28,7 +28,7 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
memcpy(p, obj->data, obj->len);
return p + XDR_QUADLEN(obj->len);
}
-EXPORT_SYMBOL(xdr_encode_netobj);
+EXPORT_SYMBOL_GPL(xdr_encode_netobj);
__be32 *
xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
@@ -41,7 +41,7 @@ xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
obj->data = (u8 *) p;
return p + XDR_QUADLEN(len);
}
-EXPORT_SYMBOL(xdr_decode_netobj);
+EXPORT_SYMBOL_GPL(xdr_decode_netobj);
/**
* xdr_encode_opaque_fixed - Encode fixed length opaque data
@@ -71,7 +71,7 @@ __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
}
return p;
}
-EXPORT_SYMBOL(xdr_encode_opaque_fixed);
+EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
/**
* xdr_encode_opaque - Encode variable length opaque data
@@ -86,14 +86,14 @@ __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
*p++ = htonl(nbytes);
return xdr_encode_opaque_fixed(p, ptr, nbytes);
}
-EXPORT_SYMBOL(xdr_encode_opaque);
+EXPORT_SYMBOL_GPL(xdr_encode_opaque);
__be32 *
xdr_encode_string(__be32 *p, const char *string)
{
return xdr_encode_array(p, string, strlen(string));
}
-EXPORT_SYMBOL(xdr_encode_string);
+EXPORT_SYMBOL_GPL(xdr_encode_string);
__be32 *
xdr_decode_string_inplace(__be32 *p, char **sp,
@@ -108,7 +108,7 @@ xdr_decode_string_inplace(__be32 *p, char **sp,
*sp = (char *) p;
return p + XDR_QUADLEN(len);
}
-EXPORT_SYMBOL(xdr_decode_string_inplace);
+EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
void
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
@@ -136,7 +136,7 @@ xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
xdr->buflen += len;
xdr->len += len;
}
-EXPORT_SYMBOL(xdr_encode_pages);
+EXPORT_SYMBOL_GPL(xdr_encode_pages);
void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
@@ -158,7 +158,7 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
xdr->buflen += len;
}
-EXPORT_SYMBOL(xdr_inline_pages);
+EXPORT_SYMBOL_GPL(xdr_inline_pages);
/*
* Helper routines for doing 'memmove' like operations on a struct xdr_buf
@@ -428,7 +428,7 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
{
xdr_shrink_bufhead(buf, len);
}
-EXPORT_SYMBOL(xdr_shift_buf);
+EXPORT_SYMBOL_GPL(xdr_shift_buf);
/**
* xdr_init_encode - Initialize a struct xdr_stream for sending data.
@@ -465,7 +465,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
iov->iov_len += len;
}
}
-EXPORT_SYMBOL(xdr_init_encode);
+EXPORT_SYMBOL_GPL(xdr_init_encode);
/**
* xdr_reserve_space - Reserve buffer space for sending
@@ -492,7 +492,7 @@ __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
xdr->buf->len += nbytes;
return p;
}
-EXPORT_SYMBOL(xdr_reserve_space);
+EXPORT_SYMBOL_GPL(xdr_reserve_space);
/**
* xdr_write_pages - Insert a list of pages into an XDR buffer for sending
@@ -527,7 +527,7 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
buf->buflen += len;
buf->len += len;
}
-EXPORT_SYMBOL(xdr_write_pages);
+EXPORT_SYMBOL_GPL(xdr_write_pages);
/**
* xdr_init_decode - Initialize an xdr_stream for decoding data.
@@ -547,7 +547,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
xdr->p = p;
xdr->end = (__be32 *)((char *)iov->iov_base + len);
}
-EXPORT_SYMBOL(xdr_init_decode);
+EXPORT_SYMBOL_GPL(xdr_init_decode);
/**
* xdr_inline_decode - Retrieve non-page XDR data to decode
@@ -569,7 +569,7 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
xdr->p = q;
return p;
}
-EXPORT_SYMBOL(xdr_inline_decode);
+EXPORT_SYMBOL_GPL(xdr_inline_decode);
/**
* xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
@@ -613,7 +613,7 @@ void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
xdr->p = (__be32 *)((char *)iov->iov_base + padding);
xdr->end = (__be32 *)((char *)iov->iov_base + end);
}
-EXPORT_SYMBOL(xdr_read_pages);
+EXPORT_SYMBOL_GPL(xdr_read_pages);
/**
* xdr_enter_page - decode data from the XDR page
@@ -638,7 +638,7 @@ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
xdr->end = (__be32 *)((char *)xdr->p + len);
}
-EXPORT_SYMBOL(xdr_enter_page);
+EXPORT_SYMBOL_GPL(xdr_enter_page);
static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
@@ -650,7 +650,7 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
buf->page_len = 0;
buf->buflen = buf->len = iov->iov_len;
}
-EXPORT_SYMBOL(xdr_buf_from_iov);
+EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
/* Sets subbuf to the portion of buf of length len beginning base bytes
* from the start of buf. Returns -1 if base of length are out of bounds. */
@@ -699,7 +699,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
return -1;
return 0;
}
-EXPORT_SYMBOL(xdr_buf_subsegment);
+EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
@@ -730,7 +730,7 @@ int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, u
__read_bytes_from_xdr_buf(&subbuf, obj, len);
return 0;
}
-EXPORT_SYMBOL(read_bytes_from_xdr_buf);
+EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
@@ -774,7 +774,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
*obj = ntohl(raw);
return 0;
}
-EXPORT_SYMBOL(xdr_decode_word);
+EXPORT_SYMBOL_GPL(xdr_decode_word);
int
xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
@@ -783,7 +783,7 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
}
-EXPORT_SYMBOL(xdr_encode_word);
+EXPORT_SYMBOL_GPL(xdr_encode_word);
/* If the netobj starting offset bytes from the start of xdr_buf is contained
* entirely in the head or the tail, set object to point to it; otherwise
@@ -821,7 +821,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in
__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
return 0;
}
-EXPORT_SYMBOL(xdr_buf_read_netobj);
+EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
/* Returns 0 on success, or else a negative error code. */
static int
@@ -1027,7 +1027,7 @@ xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
return xdr_xcode_array2(buf, base, desc, 0);
}
-EXPORT_SYMBOL(xdr_decode_array2);
+EXPORT_SYMBOL_GPL(xdr_decode_array2);
int
xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
@@ -1039,7 +1039,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
return xdr_xcode_array2(buf, base, desc, 1);
}
-EXPORT_SYMBOL(xdr_encode_array2);
+EXPORT_SYMBOL_GPL(xdr_encode_array2);
int
xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
@@ -1106,5 +1106,5 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
out:
return ret;
}
-EXPORT_SYMBOL(xdr_process_buf);
+EXPORT_SYMBOL_GPL(xdr_process_buf);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 284eaef1dbf2..a2adb51849a9 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -44,27 +44,14 @@ static struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_SENTINEL
};
-static unsigned long
-fold_field(void *mib[], int offt)
-{
- unsigned long res = 0;
- int i;
-
- for_each_possible_cpu(i) {
- res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
- res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
- }
- return res;
-}
-
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
int i;
for (i=0; xfrm_mib_list[i].name; i++)
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
- fold_field((void **)net->mib.xfrm_statistics,
- xfrm_mib_list[i].entry));
+ snmp_fold_field((void **)net->mib.xfrm_statistics,
+ xfrm_mib_list[i].entry));
return 0;
}