blob: c29c5269703d191aa63fcbc31dc6e37abc368ee5 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
|
#ifndef _SUNVNETCOMMON_H
#define _SUNVNETCOMMON_H
#include <linux/interrupt.h>
/* length of time (or less) we expect pending descriptors to be marked
* as VIO_DESC_DONE and skbs ready to be freed
*/
#define VNET_CLEAN_TIMEOUT ((HZ / 100) + 1)
#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN)
#define VNET_TX_RING_SIZE 512
#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
/* VNET packets are sent in buffers with the first 6 bytes skipped
* so that after the ethernet header the IPv4/IPv6 headers are aligned
* properly.
*/
#define VNET_PACKET_SKIP 6
#define VNET_MAXCOOKIES (VNET_MAXPACKET / PAGE_SIZE + 1)
#define VNET_MAX_TXQS 16
struct vnet_tx_entry {
struct sk_buff *skb;
unsigned int ncookies;
struct ldc_trans_cookie cookies[VNET_MAXCOOKIES];
};
struct vnet;
struct vnet_port {
struct vio_driver_state vio;
struct hlist_node hash;
u8 raddr[ETH_ALEN];
unsigned switch_port:1;
unsigned tso:1;
unsigned __pad:14;
struct vnet *vp;
struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
struct list_head list;
u32 stop_rx_idx;
bool stop_rx;
bool start_cons;
struct timer_list clean_timer;
u64 rmtu;
u16 tsolen;
struct napi_struct napi;
u32 napi_stop_idx;
bool napi_resume;
int rx_event;
u16 q_index;
};
static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
{
return container_of(vio, struct vnet_port, vio);
}
#define VNET_PORT_HASH_SIZE 16
#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
static inline unsigned int vnet_hashfn(u8 *mac)
{
unsigned int val = mac[4] ^ mac[5];
return val & (VNET_PORT_HASH_MASK);
}
struct vnet_mcast_entry {
u8 addr[ETH_ALEN];
u8 sent;
u8 hit;
struct vnet_mcast_entry *next;
};
struct vnet {
/* Protects port_list and port_hash. */
spinlock_t lock;
struct net_device *dev;
u32 msg_enable;
struct list_head port_list;
struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
struct vnet_mcast_entry *mcast_list;
struct list_head list;
u64 local_mac;
int nports;
};
/* Common funcs */
void sunvnet_clean_timer_expire_common(unsigned long port0);
int sunvnet_open_common(struct net_device *dev);
int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
void sunvnet_tx_timeout_common(struct net_device *dev);
int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu);
int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev);
u16 sunvnet_select_queue_common(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv,
select_queue_fallback_t fallback);
#ifdef CONFIG_NET_POLL_CONTROLLER
void sunvnet_poll_controller_common(struct net_device *dev);
#endif
void sunvnet_event_common(void *arg, int event);
int sunvnet_send_attr_common(struct vio_driver_state *vio);
int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg);
void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
int sunvnet_poll_common(struct napi_struct *napi, int budget);
void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
void sunvnet_port_add_txq_common(struct vnet_port *port);
void sunvnet_port_rm_txq_common(struct vnet_port *port);
#endif /* _SUNVNETCOMMON_H */
|