net: rmnet_data: add support for UL MAP based checksum offload

Add UL checksum offload routines for MAPv3. Can bypass checksum software
for IPv4/IPv6 TCP/UDP protocols.
Set rmnet_data VNDs hw_flags to NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM to
define the checksum offload abilities.
Add UL checksum meta-info header for IPv4/IPv6 TCP/UDP packets for which
UL checksum is being offloaded.

CRs-fixed: 731693
Change-Id: Ief139d357b528aead66acfe39a5227328b8fbf93
Signed-off-by: Sivan Reinstein <sivanr@codeaurora.org>
This commit is contained in:
Sivan Reinstein 2014-09-03 15:40:27 +03:00
parent 64ba607520
commit 6b9a08570c
8 changed files with 177 additions and 4 deletions

View File

@ -23,6 +23,7 @@
#define RMNET_EGRESS_FORMAT_MAP (1<<1)
#define RMNET_EGRESS_FORMAT_AGGREGATION (1<<2)
#define RMNET_EGRESS_FORMAT_MUXING (1<<3)
#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3 (1<<4)
#define RMNET_INGRESS_FIX_ETHERNET (1<<0)
#define RMNET_INGRESS_FORMAT_MAP (1<<1)

View File

@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/rmnet_data.h>
#include <linux/net_map.h>
#include <linux/netdev_features.h>
#include "rmnet_data_private.h"
#include "rmnet_data_config.h"
#include "rmnet_data_vnd.h"
@ -351,6 +352,7 @@ static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb,
* @config: Physical endpoint configuration for the egress device
* @ep: logical endpoint configuration of the packet originator
* (e.g.. RmNet virtual network device)
* @orig_dev: The originator vnd device
*
* Called if and only if MAP is configured in the egress device's egress data
* format. Will expand skb if there is insufficient headroom for MAP protocol.
@ -362,14 +364,21 @@ static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb,
*/
static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_phys_ep_conf_s *config,
struct rmnet_logical_ep_conf_s *ep)
struct rmnet_logical_ep_conf_s *ep,
struct net_device *orig_dev)
{
int required_headroom, additional_header_length;
int required_headroom, additional_header_length, ckresult;
struct rmnet_map_header_s *map_header;
additional_header_length = 0;
required_headroom = sizeof(struct rmnet_map_header_s);
if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) {
required_headroom +=
sizeof(struct rmnet_map_ul_checksum_header_s);
additional_header_length +=
sizeof(struct rmnet_map_ul_checksum_header_s);
}
LOGD("headroom of %d bytes", required_headroom);
@ -381,6 +390,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
}
}
if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) {
ckresult = rmnet_map_checksum_uplink_packet(skb, orig_dev);
trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
rmnet_stats_ul_checksum(ckresult);
}
map_header = rmnet_map_add_map_header(skb, additional_header_length);
if (!map_header) {
@ -544,7 +559,7 @@ void rmnet_egress_handler(struct sk_buff *skb,
skb->dev->name, config->egress_data_format);
if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
switch (rmnet_map_egress_handler(skb, config, ep)) {
switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
case RMNET_MAP_CONSUMED:
LOGD("%s", "MAP process consumed packet");
return;

View File

@ -57,6 +57,11 @@ unsigned long int checksum_dl_stats[RMNET_MAP_CHECKSUM_ENUM_LENGTH];
module_param_array(checksum_dl_stats, ulong, 0, S_IRUGO);
MODULE_PARM_DESC(checksum_dl_stats, "Downlink Checksum Statistics");
static DEFINE_SPINLOCK(rmnet_checksum_ul_stats);
unsigned long int checksum_ul_stats[RMNET_MAP_CHECKSUM_ENUM_LENGTH];
module_param_array(checksum_ul_stats, ulong, 0, S_IRUGO);
MODULE_PARM_DESC(checksum_ul_stats, "Uplink Checksum Statistics");
void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason)
{
unsigned long flags;
@ -117,3 +122,15 @@ void rmnet_stats_dl_checksum(unsigned int rc)
checksum_dl_stats[rc]++;
spin_unlock_irqrestore(&rmnet_checksum_dl_stats, flags);
}
void rmnet_stats_ul_checksum(unsigned int rc)
{
unsigned long flags;
if (rc >= RMNET_MAP_CHECKSUM_ENUM_LENGTH)
rc = RMNET_MAP_CHECKSUM_ERR_UNKOWN;
spin_lock_irqsave(&rmnet_checksum_ul_stats, flags);
checksum_ul_stats[rc]++;
spin_unlock_irqrestore(&rmnet_checksum_ul_stats, flags);
}

View File

@ -56,4 +56,5 @@ void rmnet_stats_queue_xmit(int rc, unsigned int reason);
void rmnet_stats_deagg_pkts(int aggcount);
void rmnet_stats_agg_pkts(int aggcount);
void rmnet_stats_dl_checksum(unsigned int rc);
void rmnet_stats_ul_checksum(unsigned int rc);
#endif /* _RMNET_DATA_STATS_H_ */

View File

@ -220,6 +220,26 @@ TRACE_EVENT(rmnet_map_checksum_downlink_packet,
__get_str(name), __entry->res)
)
TRACE_EVENT(rmnet_map_checksum_uplink_packet,
TP_PROTO(struct net_device *dev, int ckresult),
TP_ARGS(dev, ckresult),
TP_STRUCT__entry(
__string(name, dev->name)
__field(int, res)
),
TP_fast_assign(
__assign_str(name, dev->name);
__entry->res = ckresult;
),
TP_printk("UL checksum on dev=%s, res: %d",
__get_str(name), __entry->res)
)
#endif /* _RMNET_DATA_TRACE_H_ */
/* This part must be outside protection */

View File

@ -591,6 +591,11 @@ int rmnet_vnd_create_dev(int id, struct net_device **new_device,
return RMNET_CONFIG_NOMEM;
}
if (!prefix) {
/* Configuring UL checksum offload on rmnet_data interfaces */
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
}
rc = register_netdevice(dev);
if (rc != 0) {
LOGE("Failed to to register netdev [%s]", dev->name);

View File

@ -64,6 +64,21 @@ struct rmnet_map_dl_checksum_trailer_s {
unsigned short checksum_value;
} __aligned(1);
struct rmnet_map_ul_checksum_header_s {
unsigned short checksum_start_offset;
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned short checksum_insert_offset:14;
unsigned short udp_ip4_ind:1;
unsigned short cks_en:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned short cks_en:1;
unsigned short udp_ip4_ind:1;
unsigned short checksum_insert_offset:14;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} __aligned(1);
enum rmnet_map_results_e {
RMNET_MAP_SUCCESS,
RMNET_MAP_CONSUMED,
@ -93,6 +108,7 @@ enum rmnet_map_checksum_errors_e {
RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT,
RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET,
RMNET_MAP_CHECKSUM_SKIPPED,
RMNET_MAP_CHECKSUM_SW,
/* This should always be the last element */
RMNET_MAP_CHECKSUM_ENUM_LENGTH
};
@ -128,6 +144,7 @@ void rmnet_map_aggregate(struct sk_buff *skb,
struct rmnet_phys_ep_conf_s *config);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb);
int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */

View File

@ -571,3 +571,100 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb)
return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
}
static void rmnet_map_fill_ipv4_packet_ul_checksum_header(void *iphdr,
struct rmnet_map_ul_checksum_header_s *ul_header, struct sk_buff *skb)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
unsigned short *hdr = (unsigned short *)ul_header;
ul_header->checksum_start_offset = htons((unsigned short)
(skb_transport_header(skb) - (unsigned char *)iphdr));
ul_header->checksum_insert_offset = skb->csum_offset + (unsigned short)
(skb_transport_header(skb) - (unsigned char *)iphdr);
ul_header->cks_en = 1;
if (ip4h->protocol == IPPROTO_UDP)
ul_header->udp_ip4_ind = 1;
else
ul_header->udp_ip4_ind = 0;
/* Changing checksum_insert_offset to network order */
hdr++;
*hdr = htons(*hdr);
skb->ip_summed = CHECKSUM_NONE;
}
static void rmnet_map_fill_ipv6_packet_ul_checksum_header(void *iphdr,
struct rmnet_map_ul_checksum_header_s *ul_header, struct sk_buff *skb)
{
unsigned short *hdr = (unsigned short *)ul_header;
ul_header->checksum_start_offset = htons((unsigned short)
(skb_transport_header(skb) - (unsigned char *)iphdr));
ul_header->checksum_insert_offset = skb->csum_offset + (unsigned short)
(skb_transport_header(skb) - (unsigned char *)iphdr);
ul_header->cks_en = 1;
ul_header->udp_ip4_ind = 0;
/* Changing checksum_insert_offset to network order */
hdr++;
*hdr = htons(*hdr);
skb->ip_summed = CHECKSUM_NONE;
}
/**
* rmnet_map_checksum_uplink_packet() - Generates UL checksum
* meta info header
* @skb: Pointer to the packet's skb.
*
* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*
* Return:
* - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
* - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
* - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload.
*/
int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
unsigned char ip_version;
struct rmnet_map_ul_checksum_header_s *ul_header;
void *iphdr;
int ret;
ul_header = (struct rmnet_map_ul_checksum_header_s *)
skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s));
if (unlikely(!(orig_dev->features &
(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)))) {
ret = RMNET_MAP_CHECKSUM_SW;
goto sw_checksum;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
iphdr = (char *)ul_header +
sizeof(struct rmnet_map_ul_checksum_header_s);
ip_version = (*(char *)iphdr & 0xF0) >> 4;
if (ip_version == 0x04) {
rmnet_map_fill_ipv4_packet_ul_checksum_header(iphdr,
ul_header, skb);
return RMNET_MAP_CHECKSUM_OK;
} else if (ip_version == 0x06) {
rmnet_map_fill_ipv6_packet_ul_checksum_header(iphdr,
ul_header, skb);
return RMNET_MAP_CHECKSUM_OK;
} else {
ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
goto sw_checksum;
}
} else {
ret = RMNET_MAP_CHECKSUM_SW;
goto sw_checksum;
}
sw_checksum:
ul_header->checksum_start_offset = 0;
ul_header->checksum_insert_offset = 0;
ul_header->cks_en = 0;
ul_header->udp_ip4_ind = 0;
return ret;
}