[TCP]: Introduce tcp_hdrlen() and tcp_optlen()

The ip_hdrlen() buddy, created to reduce the number of skb->h.th-> uses and to
avoid the longer, open coded equivalent.

Ditched a no-op in bnx2 in the process.

I wonder if we should have a BUG_ON(skb->h.th->doff < 5) in tcp_optlen()...

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Arnaldo Carvalho de Melo 2007-03-18 17:43:48 -07:00 committed by David S. Miller
parent 88c7664f13
commit ab6a5bb6b2
14 changed files with 32 additions and 26 deletions

View File

@ -1307,7 +1307,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
tso->tsopl |= (iph->ihl &
CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
tso->tsopl |= ((skb->h.th->doff << 2) &
tso->tsopl |= (tcp_hdrlen(skb) &
TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
tso->tsopl |= (skb_shinfo(skb)->gso_size &
TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
@ -1369,8 +1369,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
if (tcp_seg) {
/* TSO/GSO */
proto_hdr_len = (skb_transport_offset(skb) +
(skb->h.th->doff << 2));
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
buffer_info->length = proto_hdr_len;
page = virt_to_page(skb->data);
offset = (unsigned long)skb->data & ~PAGE_MASK;
@ -1563,7 +1562,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = (skb_transport_offset(skb) +
(skb->h.th->doff << 2));
tcp_hdrlen(skb));
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;

View File

@ -4521,13 +4521,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
tcp_opt_len = ((skb->h.th->doff - 5) * 4);
vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
tcp_opt_len = 0;
if (skb->h.th->doff > 5) {
tcp_opt_len = (skb->h.th->doff - 5) << 2;
}
if (skb->h.th->doff > 5)
tcp_opt_len = tcp_optlen(skb);
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
iph = ip_hdr(skb);

View File

@ -2887,7 +2887,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return err;
}
hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2));
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@ -3292,7 +3292,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
* points to just header, pull a few bytes of payload from
* frags into skb->data */
hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2));
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
switch (adapter->hw.mac_type) {
unsigned int pull_size;

View File

@ -1300,7 +1300,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
/* copy only eth/ip/tcp headers to immediate data and
* the rest of skb->data to sg1entry
*/
headersize = ETH_HLEN + ip_hdrlen(skb) + (skb->h.th->doff * 4);
headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
skb_data_size = skb->len - skb->data_len;

View File

@ -1190,7 +1190,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
return err;
}
hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2));
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
iph = ip_hdr(skb);
iph->tot_len = 0;

View File

@ -2054,8 +2054,7 @@ again:
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header must be at most 134 bytes long */
cum_len = -(skb_transport_offset(skb) +
(skb->h.th->doff << 2));
cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
/* for TSO, pseudo_hdr_offset holds mss.
* The firmware figures out where to put

View File

@ -374,8 +374,7 @@ void netxen_tso_check(struct netxen_adapter *adapter,
{
if (desc->mss) {
desc->total_hdr_length = (sizeof(struct ethhdr) +
ip_hdrlen(skb) +
skb->h.th->doff * 4);
ip_hdrlen(skb) + tcp_hdrlen(skb));
netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip_hdr(skb)->protocol == IPPROTO_TCP) {

View File

@ -779,7 +779,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (skb_shinfo(skb)->gso_size > 0) {
no_of_desc++;
if ((ip_hdrlen(skb) + skb->h.th->doff * 4 +
if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
sizeof(struct ethhdr)) >
(sizeof(struct cmd_desc_type0) - 2)) {
no_of_desc++;

View File

@ -1392,7 +1392,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
/* Check for TCP Segmentation Offload */
mss = skb_shinfo(skb)->gso_size;
if (mss != 0) {
mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
mss += tcp_optlen(skb); /* TCP options */
mss += ip_hdrlen(skb) + sizeof(struct tcphdr);
mss += ETH_HLEN;

View File

@ -3911,7 +3911,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
else {
struct iphdr *iph = ip_hdr(skb);
tcp_opt_len = ((skb->h.th->doff - 5) * 4);
tcp_opt_len = tcp_optlen(skb);
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
iph->check = 0;
@ -4065,7 +4065,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
goto out_unlock;
}
tcp_opt_len = ((skb->h.th->doff - 5) * 4);
tcp_opt_len = tcp_optlen(skb);
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
hdr_len = ip_tcp_len + tcp_opt_len;

View File

@ -477,13 +477,13 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
skb_network_header(skb),
ip_hdrlen(skb),
skb->h.raw,
skb->h.th->doff * 4);
tcp_hdrlen(skb));
else
eddp = qeth_eddp_create_eddp_data(qhdr,
skb_network_header(skb),
sizeof(struct ipv6hdr),
skb->h.raw,
skb->h.th->doff * 4);
tcp_hdrlen(skb));
if (eddp == NULL) {
QETH_DBF_TEXT(trace, 2, "eddpfcnm");
@ -596,11 +596,11 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
ctx = qeth_eddp_create_context_generic(card, skb,
(sizeof(struct qeth_hdr) +
ip_hdrlen(skb) +
skb->h.th->doff * 4));
tcp_hdrlen(skb)));
else if (skb->protocol == htons(ETH_P_IPV6))
ctx = qeth_eddp_create_context_generic(card, skb,
sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
skb->h.th->doff*4);
tcp_hdrlen(skb));
else
QETH_DBF_TEXT(trace, 2, "cetcpinv");

View File

@ -178,6 +178,16 @@ struct tcp_md5sig {
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
return skb->h.th->doff * 4;
}
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
{
return (skb->h.th->doff - 5) * 4;
}
/* This defines a selective acknowledgement block. */
struct tcp_sack_block_wire {
__be32 start_seq;

View File

@ -1564,7 +1564,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {

View File

@ -1609,7 +1609,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {