diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-06-18 22:47:59 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-06-18 22:47:59 -0700 |
commit | 0e87506fcc734647c7b2497eee4eb81e785c857a (patch) | |
tree | bb8863c59fdef2628f17b6773c52801792a57722 /include/net | |
parent | 60236fdd08b2169045a3bbfc5ffe1576e6c3c17b (diff) | |
download | linux-0e87506fcc734647c7b2497eee4eb81e785c857a.tar.bz2 |
[NET] Generalise tcp_listen_opt
This chunks out the accept_queue and tcp_listen_opt code and moves
them to net/core/request_sock.c and include/net/request_sock.h, to
make it useful for other transport protocols, DCCP being the first one
to use it.
Next patches will rename tcp_listen_opt to accept_sock and remove the
inline tcp functions that just call a reqsk_queue_ function.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/request_sock.h | 178 | ||||
-rw-r--r-- | include/net/tcp.h | 46 |
2 files changed, 186 insertions, 38 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 08a8fd1d1610..38943ed04e73 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -16,7 +16,9 @@ #define _REQUEST_SOCK_H #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/types.h> + #include <net/sock.h> struct request_sock; @@ -74,4 +76,180 @@ static inline void reqsk_free(struct request_sock *req) __reqsk_free(req); } +extern int sysctl_max_syn_backlog; + +/** struct tcp_listen_opt - listen state + * + * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs + */ +struct tcp_listen_opt { + u8 max_qlen_log; + /* 3 bytes hole, try to use */ + int qlen; + int qlen_young; + int clock_hand; + u32 hash_rnd; + struct request_sock *syn_table[0]; +}; + +/** struct request_sock_queue - queue of request_socks + * + * @rskq_accept_head - FIFO head of established children + * @rskq_accept_tail - FIFO tail of established children + * @syn_wait_lock - serializer + * + * %syn_wait_lock is necessary only to avoid proc interface having to grab the main + * lock sock while browsing the listening hash (otherwise it's deadlock prone). + * + * This lock is acquired in read mode only from listening_get_next() seq_file + * op and it's acquired in write mode _only_ from code that is actively + * changing rskq_accept_head. All readers that are holding the master sock lock + * don't need to grab this lock in read mode too as rskq_accept_head. writes + * are always protected from the main sock lock. + */ +struct request_sock_queue { + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + rwlock_t syn_wait_lock; + struct tcp_listen_opt *listen_opt; +}; + +extern int reqsk_queue_alloc(struct request_sock_queue *queue, + const int nr_table_entries); + +static inline struct tcp_listen_opt *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue) +{ + struct tcp_listen_opt *lopt; + + write_lock_bh(&queue->syn_wait_lock); + lopt = queue->listen_opt; + queue->listen_opt = NULL; + write_unlock_bh(&queue->syn_wait_lock); + + return lopt; +} + +static inline void reqsk_queue_destroy(struct request_sock_queue *queue) +{ + kfree(reqsk_queue_yank_listen_sk(queue)); +} + +static inline struct request_sock * + reqsk_queue_yank_acceptq(struct request_sock_queue *queue) +{ + struct request_sock *req = queue->rskq_accept_head; + + queue->rskq_accept_head = queue->rskq_accept_head = NULL; + return req; +} + +static inline int reqsk_queue_empty(struct request_sock_queue *queue) +{ + return queue->rskq_accept_head == NULL; +} + +static inline void reqsk_queue_unlink(struct request_sock_queue *queue, + struct request_sock *req, + struct request_sock **prev_req) +{ + write_lock(&queue->syn_wait_lock); + *prev_req = req->dl_next; + write_unlock(&queue->syn_wait_lock); +} + +static inline void reqsk_queue_add(struct request_sock_queue *queue, + struct request_sock *req, + struct sock *parent, + struct sock *child) +{ + req->sk = child; + sk_acceptq_added(parent); + + if (queue->rskq_accept_head == NULL) + queue->rskq_accept_head = req; + else + queue->rskq_accept_tail->dl_next = req; + + queue->rskq_accept_tail = req; + req->dl_next = NULL; +} + +static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue) +{ + struct request_sock *req = queue->rskq_accept_head; + + BUG_TRAP(req != NULL); + + queue->rskq_accept_head = req->dl_next; + if (queue->rskq_accept_head == NULL) + queue->rskq_accept_tail = NULL; + + return req; +} + +static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue, + struct sock *parent) +{ + struct request_sock *req = reqsk_queue_remove(queue); + struct sock *child = req->sk; + + BUG_TRAP(child != NULL); + + sk_acceptq_removed(parent); + __reqsk_free(req); + return child; +} + +static inline int reqsk_queue_removed(struct request_sock_queue *queue, + struct request_sock *req) +{ + struct tcp_listen_opt *lopt = queue->listen_opt; + + if (req->retrans == 0) + --lopt->qlen_young; + + return --lopt->qlen; +} + +static inline int reqsk_queue_added(struct request_sock_queue *queue) +{ + struct tcp_listen_opt *lopt = queue->listen_opt; + const int prev_qlen = lopt->qlen; + + lopt->qlen_young++; + lopt->qlen++; + return prev_qlen; +} + +static inline int reqsk_queue_len(struct request_sock_queue *queue) +{ + return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; +} + +static inline int reqsk_queue_len_young(struct request_sock_queue *queue) +{ + return queue->listen_opt->qlen_young; +} + +static inline int reqsk_queue_is_full(struct request_sock_queue *queue) +{ + return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; +} + +static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, + u32 hash, struct request_sock *req, + unsigned timeout) +{ + struct tcp_listen_opt *lopt = queue->listen_opt; + + req->expires = jiffies + timeout; + req->retrans = 0; + req->sk = NULL; + req->dl_next = lopt->syn_table[hash]; + + write_lock(&queue->syn_wait_lock); + lopt->syn_table[hash] = req; + write_unlock(&queue->syn_wait_lock); +} + #endif /* _REQUEST_SOCK_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 6663086a5e35..a2e323c54457 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1686,71 +1686,41 @@ static inline int tcp_full_space(const struct sock *sk) static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, struct sock *child) { - struct tcp_sock *tp = tcp_sk(sk); - - req->sk = child; - sk_acceptq_added(sk); - - if (!tp->accept_queue_tail) { - tp->accept_queue = req; - } else { - tp->accept_queue_tail->dl_next = req; - } - tp->accept_queue_tail = req; - req->dl_next = NULL; + reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); } -struct tcp_listen_opt -{ - u8 max_qlen_log; /* log_2 of maximal queued SYNs */ - int qlen; - int qlen_young; - int clock_hand; - u32 hash_rnd; - struct request_sock *syn_table[TCP_SYNQ_HSIZE]; -}; - static inline void tcp_synq_removed(struct sock *sk, struct request_sock *req) { - struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; - - if (--lopt->qlen == 0) + if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) tcp_delete_keepalive_timer(sk); - if (req->retrans == 0) - lopt->qlen_young--; } static inline void tcp_synq_added(struct sock *sk) { - struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; - - if (lopt->qlen++ == 0) + if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); - lopt->qlen_young++; } static inline int tcp_synq_len(struct sock *sk) { - return tcp_sk(sk)->listen_opt->qlen; + return reqsk_queue_len(&tcp_sk(sk)->accept_queue); } static inline int tcp_synq_young(struct sock *sk) { - return tcp_sk(sk)->listen_opt->qlen_young; + return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); } static inline int tcp_synq_is_full(struct sock *sk) { - return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; + return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); } static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, - struct request_sock **prev) + struct request_sock **prev) { - write_lock(&tp->syn_wait_lock); - *prev = req->dl_next; - write_unlock(&tp->syn_wait_lock); + reqsk_queue_unlink(&tp->accept_queue, req, prev); } static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, |