summaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c60
1 files changed, 6 insertions, 54 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 9cc00f8a6ee5..95bfa1ba5b28 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -889,7 +889,6 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- inetpeer_invalidate_family(AF_INET);
}
/*
@@ -1216,22 +1215,6 @@ skip_hashing:
return rt;
}
-void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
-{
- struct inet_peer_base *base;
- struct inet_peer *peer;
-
- base = inetpeer_base_ptr(rt->_peer);
- if (!base)
- return;
-
- peer = inet_getpeer_v4(base, daddr, create);
- if (peer) {
- if (!rt_set_peer(rt, peer))
- inet_putpeer(peer);
- }
-}
-
/*
* Peer allocation may fail only in serious out-of-memory conditions. However
* we still can generate some output.
@@ -1588,10 +1571,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
fib_info_put(rt->fi);
rt->fi = NULL;
}
- if (rt_has_peer(rt)) {
- struct inet_peer *peer = rt_peer_ptr(rt);
- inet_putpeer(peer);
- }
}
@@ -1711,26 +1690,11 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
struct fib_info *fi)
{
- struct inet_peer_base *base;
- struct inet_peer *peer;
-
- base = inetpeer_base_ptr(rt->_peer);
- BUG_ON(!base);
-
- peer = inet_getpeer_v4(base, rt->rt_dst, 0);
- if (peer) {
- __rt_set_peer(rt, peer);
- if (inet_metrics_new(peer))
- memcpy(peer->metrics, fi->fib_metrics,
- sizeof(u32) * RTAX_MAX);
- dst_init_metrics(&rt->dst, peer->metrics, false);
- } else {
- if (fi->fib_metrics != (u32 *) dst_default_metrics) {
- rt->fi = fi;
- atomic_inc(&fi->fib_clntref);
- }
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+ rt->fi = fi;
+ atomic_inc(&fi->fib_clntref);
}
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
}
static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
@@ -1820,7 +1784,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_mark = skb->mark;
rth->rt_pmtu = 0;
rth->rt_gateway = daddr;
- rt_init_peer(rth, dev_net(dev)->ipv4.peers);
rth->fi = NULL;
if (our) {
rth->dst.input= ip_local_deliver;
@@ -1946,7 +1909,6 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_mark = skb->mark;
rth->rt_pmtu = 0;
rth->rt_gateway = daddr;
- rt_init_peer(rth, &res->table->tb_peers);
rth->fi = NULL;
rth->dst.input = ip_forward;
@@ -2125,7 +2087,6 @@ local_input:
rth->rt_mark = skb->mark;
rth->rt_pmtu = 0;
rth->rt_gateway = daddr;
- rt_init_peer(rth, net->ipv4.peers);
rth->fi = NULL;
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
@@ -2323,9 +2284,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_mark = fl4->flowi4_mark;
rth->rt_pmtu = 0;
rth->rt_gateway = fl4->daddr;
- rt_init_peer(rth, (res->table ?
- &res->table->tb_peers :
- dev_net(dev_out)->ipv4.peers));
rth->fi = NULL;
RT_CACHE_STAT_INC(out_slow_tot);
@@ -2662,7 +2620,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_dst = ort->rt_dst;
rt->rt_src = ort->rt_src;
rt->rt_gateway = ort->rt_gateway;
- rt_transfer_peer(rt, ort);
rt->fi = ort->fi;
if (rt->fi)
atomic_inc(&rt->fi->fib_clntref);
@@ -2700,7 +2657,7 @@ static int rt_fill_info(struct net *net,
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
- u32 id = 0, error;
+ u32 error;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
@@ -2753,11 +2710,6 @@ static int rt_fill_info(struct net *net,
goto nla_put_failure;
error = rt->dst.error;
- if (rt_has_peer(rt)) {
- const struct inet_peer *peer = rt_peer_ptr(rt);
- inet_peer_refcheck(peer);
- id = atomic_read(&peer->ip_id_count) & 0xffff;
- }
expires = rt->dst.expires;
if (expires) {
if (time_before(jiffies, expires))
@@ -2792,7 +2744,7 @@ static int rt_fill_info(struct net *net,
goto nla_put_failure;
}
- if (rtnl_put_cacheinfo(skb, &rt->dst, id, expires, error) < 0)
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);