android_kernel_google_msm/include/net/ipip.h

70 lines
1.7 KiB
C
Raw Permalink Normal View History

#ifndef __NET_IPIP_H
#define __NET_IPIP_H 1
#include <linux/if_tunnel.h>
#include <net/ip.h>
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
/* 6rd prefix/relay information */
struct ip_tunnel_6rd_parm {
struct in6_addr prefix;
__be32 relay_prefix;
u16 prefixlen;
u16 relay_prefixlen;
};
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct net_device *dev;
int err_count; /* Number of arrived ICMP errors */
unsigned long err_time; /* Time when the last ICMP error arrived */
/* These four fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */
int hlen; /* Precalculated GRE header length */
int mlink;
struct ip_tunnel_parm parms;
/* for SIT */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
#endif
struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
unsigned int prl_count; /* # of entries in PRL */
};
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
u16 flags;
struct rcu_head rcu_head;
};
#define __IPTUNNEL_XMIT(stats1, stats2) do { \
int err; \
int pkt_len = skb->len - skb_transport_offset(skb); \
\
skb->ip_summed = CHECKSUM_NONE; \
inetpeer: get rid of ip_id_count [ Upstream commit 73f156a6e8c1074ac6327e0abd1169e95eb66463 ] Ideally, we would need to generate IP ID using a per destination IP generator. linux kernels used inet_peer cache for this purpose, but this had a huge cost on servers disabling MTU discovery. 1) each inet_peer struct consumes 192 bytes 2) inetpeer cache uses a binary tree of inet_peer structs, with a nominal size of ~66000 elements under load. 3) lookups in this tree are hitting a lot of cache lines, as tree depth is about 20. 4) If server deals with many tcp flows, we have a high probability of not finding the inet_peer, allocating a fresh one, inserting it in the tree with same initial ip_id_count, (cf secure_ip_id()) 5) We garbage collect inet_peer aggressively. IP ID generation do not have to be 'perfect' Goal is trying to avoid duplicates in a short period of time, so that reassembly units have a chance to complete reassembly of fragments belonging to one message before receiving other fragments with a recycled ID. We simply use an array of generators, and a Jenkin hash using the dst IP as a key. ipv6_select_ident() is put back into net/ipv6/ip6_output.c where it belongs (it is only used from this file) secure_ip_id() and secure_ipv6_id() no longer are needed. Rename ip_select_ident_more() to ip_select_ident_segs() to avoid unnecessary decrement/increment of the number of segments. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-02 12:26:03 +00:00
ip_select_ident(skb, NULL); \
\
err = ip_local_out(skb); \
if (likely(net_xmit_eval(err) == 0)) { \
u64_stats_update_begin(&(stats1)->syncp); \
(stats1)->tx_bytes += pkt_len; \
(stats1)->tx_packets++; \
u64_stats_update_end(&(stats1)->syncp); \
} else { \
(stats2)->tx_errors++; \
(stats2)->tx_aborted_errors++; \
} \
} while (0)
#define IPTUNNEL_XMIT() __IPTUNNEL_XMIT(txq, stats)
#endif