Skip to content

Commit

Permalink
[INET]: Just rename the TCP hashtable functions/structs to inet_
Browse files Browse the repository at this point in the history
This is to break down the complexity of the series of patches,
making it very clear that this one just does:

1. renames tcp_ prefixed hashtable functions and data structures that
   were already mostly generic to inet_ to share it with DCCP and
   other INET transport protocols.

2. Removes not used functions (__tb_head & tb_head)

3. Removes some leftover prototypes in the headers (tcp_bucket_unlock &
   tcp_v4_build_header)

Next changesets will move tcp_sk(sk)->bind_hash to inet_sock so that we can
make functions such as tcp_inherit_port, __tcp_inherit_port, tcp_v4_get_port,
__tcp_put_port,  generic and get others like tcp_destroy_sock closer to generic
(tcp_orphan_count will go to sk->sk_prot to allow this).

Eventually most of these functions will be used passing the transport protocol
inet_hashinfo structure.

Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Arnaldo Carvalho de Melo authored and David S. Miller committed Aug 29, 2005
1 parent 304a161 commit 0f7ff92
Show file tree
Hide file tree
Showing 7 changed files with 139 additions and 139 deletions.
2 changes: 1 addition & 1 deletion include/linux/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ struct tcp_sock {
__u32 snd_sml; /* Last byte of the most recently transmitted small packet */
__u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
__u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
struct tcp_bind_bucket *bind_hash;
struct inet_bind_bucket *bind_hash;
/* Delayed ACK control data */
struct {
__u8 pending; /* ACK is pending */
Expand Down
93 changes: 42 additions & 51 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,13 @@
* New scheme, half the table is for TIME_WAIT, the other half is
* for the rest. I'll experiment with dynamic table growth later.
*/
struct tcp_ehash_bucket {
struct inet_ehash_bucket {
rwlock_t lock;
struct hlist_head chain;
} __attribute__((__aligned__(8)));

/* This is for listening sockets, thus all sockets which possess wildcards. */
#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */

/* There are a few simple rules, which allow for local port reuse by
* an application. In essence:
Expand Down Expand Up @@ -83,31 +83,22 @@ struct tcp_ehash_bucket {
* users logged onto your box, isn't it nice to know that new data
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
struct tcp_bind_bucket {
struct inet_bind_bucket {
unsigned short port;
signed short fastreuse;
struct hlist_node node;
struct hlist_head owners;
};

#define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
#define inet_bind_bucket_for_each(tb, node, head) \
hlist_for_each_entry(tb, node, head, node)

struct tcp_bind_hashbucket {
struct inet_bind_hashbucket {
spinlock_t lock;
struct hlist_head chain;
};

static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
{
return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
}

static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
{
return hlist_empty(&head->chain) ? NULL : __tb_head(head);
}

extern struct tcp_hashinfo {
struct inet_hashinfo {
/* This is for sockets with full identity only. Sockets here will
* always be without wildcards and will have the following invariant:
*
Expand All @@ -116,58 +107,61 @@ extern struct tcp_hashinfo {
* First half of the table is for sockets not in TIME_WAIT, second half
* is for TIME_WAIT sockets only.
*/
struct tcp_ehash_bucket *__tcp_ehash;
struct inet_ehash_bucket *ehash;

/* Ok, let's try this, I give up, we do need a local binding
* TCP hash as well as the others for fast bind/connect.
*/
struct tcp_bind_hashbucket *__tcp_bhash;
struct inet_bind_hashbucket *bhash;

int __tcp_bhash_size;
int __tcp_ehash_size;
int bhash_size;
int ehash_size;

/* All sockets in TCP_LISTEN state will be in here. This is the only
* table where wildcard'd TCP sockets can exist. Hash function here
* is just local port number.
*/
struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
struct hlist_head listening_hash[INET_LHTABLE_SIZE];

/* All the above members are written once at bootup and
* never written again _or_ are predominantly read-access.
*
* Now align to a new cache line as all the following members
* are often dirty.
*/
rwlock_t __tcp_lhash_lock ____cacheline_aligned;
atomic_t __tcp_lhash_users;
wait_queue_head_t __tcp_lhash_wait;
spinlock_t __tcp_portalloc_lock;
} tcp_hashinfo;

#define tcp_ehash (tcp_hashinfo.__tcp_ehash)
#define tcp_bhash (tcp_hashinfo.__tcp_bhash)
#define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
#define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
#define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
#define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
#define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
#define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
#define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
rwlock_t lhash_lock ____cacheline_aligned;
atomic_t lhash_users;
wait_queue_head_t lhash_wait;
spinlock_t portalloc_lock;
};

extern struct inet_hashinfo tcp_hashinfo;
#define tcp_ehash (tcp_hashinfo.ehash)
#define tcp_bhash (tcp_hashinfo.bhash)
#define tcp_ehash_size (tcp_hashinfo.ehash_size)
#define tcp_bhash_size (tcp_hashinfo.bhash_size)
#define tcp_listening_hash (tcp_hashinfo.listening_hash)
#define tcp_lhash_lock (tcp_hashinfo.lhash_lock)
#define tcp_lhash_users (tcp_hashinfo.lhash_users)
#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)

extern kmem_cache_t *tcp_bucket_cachep;
extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
unsigned short snum);
extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
extern void tcp_bucket_unlock(struct sock *sk);
extern struct inet_bind_bucket *
inet_bind_bucket_create(kmem_cache_t *cachep,
struct inet_bind_hashbucket *head,
const unsigned short snum);
extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
struct inet_bind_bucket *tb);
extern int tcp_port_rover;

/* These are AF independent. */
static __inline__ int tcp_bhashfn(__u16 lport)
static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
{
return (lport & (tcp_bhash_size - 1));
return lport & (bhash_size - 1);
}

extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
extern void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
unsigned short snum);

#if (BITS_PER_LONG == 64)
Expand Down Expand Up @@ -212,7 +206,7 @@ struct tcp_tw_bucket {
__u32 tw_ts_recent;
long tw_ts_recent_stamp;
unsigned long tw_ttd;
struct tcp_bind_bucket *tw_tb;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr tw_v6_daddr;
Expand Down Expand Up @@ -366,14 +360,14 @@ extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))

/* These can have wildcards, don't try too hard. */
static __inline__ int tcp_lhashfn(unsigned short num)
static inline int inet_lhashfn(const unsigned short num)
{
return num & (TCP_LHTABLE_SIZE - 1);
return num & (INET_LHTABLE_SIZE - 1);
}

static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
static inline int inet_sk_listen_hashfn(const struct sock *sk)
{
return tcp_lhashfn(inet_sk(sk)->num);
return inet_lhashfn(inet_sk(sk)->num);
}

#define MAX_TCP_HEADER (128 + MAX_HEADER)
Expand Down Expand Up @@ -799,9 +793,6 @@ extern void tcp_parse_options(struct sk_buff *skb,
* TCP v4 functions exported for the inet6 API
*/

extern int tcp_v4_build_header(struct sock *sk,
struct sk_buff *skb);

extern void tcp_v4_send_check(struct sock *sk,
struct tcphdr *th, int len,
struct sk_buff *skb);
Expand Down
15 changes: 9 additions & 6 deletions net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,9 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);

kmem_cache_t *tcp_bucket_cachep;

EXPORT_SYMBOL_GPL(tcp_bucket_cachep);

kmem_cache_t *tcp_timewait_cachep;

atomic_t tcp_orphan_count = ATOMIC_INIT(0);
Expand Down Expand Up @@ -2259,7 +2262,7 @@ void __init tcp_init(void)
sizeof(skb->cb));

tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
sizeof(struct tcp_bind_bucket),
sizeof(struct inet_bind_bucket),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!tcp_bucket_cachep)
Expand All @@ -2277,9 +2280,9 @@ void __init tcp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
tcp_ehash = (struct tcp_ehash_bucket *)
tcp_ehash =
alloc_large_system_hash("TCP established",
sizeof(struct tcp_ehash_bucket),
sizeof(struct inet_ehash_bucket),
thash_entries,
(num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) :
Expand All @@ -2294,9 +2297,9 @@ void __init tcp_init(void)
INIT_HLIST_HEAD(&tcp_ehash[i].chain);
}

tcp_bhash = (struct tcp_bind_hashbucket *)
tcp_bhash =
alloc_large_system_hash("TCP bind",
sizeof(struct tcp_bind_hashbucket),
sizeof(struct inet_bind_hashbucket),
tcp_ehash_size,
(num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) :
Expand All @@ -2315,7 +2318,7 @@ void __init tcp_init(void)
* on available memory.
*/
for (order = 0; ((1 << order) << PAGE_SHIFT) <
(tcp_bhash_size * sizeof(struct tcp_bind_hashbucket));
(tcp_bhash_size * sizeof(struct inet_bind_hashbucket));
order++)
;
if (order >= 4) {
Expand Down
4 changes: 2 additions & 2 deletions net/ipv4/tcp_diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -590,7 +590,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!(r->tcpdiag_states&(TCPF_LISTEN|TCPF_SYN_RECV)))
goto skip_listen_ht;
tcp_listen_lock();
for (i = s_i; i < TCP_LHTABLE_SIZE; i++) {
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct sock *sk;
struct hlist_node *node;

Expand Down Expand Up @@ -646,7 +646,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;

for (i = s_i; i < tcp_ehash_size; i++) {
struct tcp_ehash_bucket *head = &tcp_ehash[i];
struct inet_ehash_bucket *head = &tcp_ehash[i];
struct sock *sk;
struct hlist_node *node;

Expand Down
Loading

0 comments on commit 0f7ff92

Please sign in to comment.