net: rmnet_data: In-band flow control

Implement MAP based in-band flow control. Added 2 new configuration
messages to allow adding and deleting flow handles. Added handlers
in VND for flow control events. Added flow control command handler
in rmnet_map_commands.

CRs-fixed: 568534
Change-Id: Ica52e4ad89430c9fa5e2b38e389ee6bc91de2e9b
Signed-off-by: Harout Hedeshian <harouth@codeaurora.org>
This commit is contained in:
Harout Hedeshian 2013-10-24 09:28:22 -06:00 committed by Gerrit - the friendly Code Review server
parent 0dcf4ed8da
commit c9132a2781
10 changed files with 553 additions and 71 deletions

View File

@ -66,6 +66,11 @@ struct rmnet_nl_msg_s {
uint32_t id;
uint8_t vnd_name[RMNET_MAX_STR_LEN];
} vnd;
struct {
uint32_t id;
uint32_t map_flow_id;
uint32_t tc_flow_id;
} flow_control;
};
};
@ -186,7 +191,24 @@ enum rmnet_netlink_message_types_e {
* Args: int32_t node number
* Returns: status code
*/
RMNET_NETLINK_FREE_VND
RMNET_NETLINK_FREE_VND,
/*
* RMNET_NETLINK_ADD_VND_TC_FLOW - Add flow control handle on VND
* Args: int32_t node number
* uint32_t MAP Flow Handle
* uint32_t TC Flow Handle
* Returns: status code
*/
RMNET_NETLINK_ADD_VND_TC_FLOW,
/*
* RMNET_NETLINK_DEL_VND_TC_FLOW - Removes flow control handle on VND
* Args: int32_t node number
* uint32_t MAP Flow Handle
* Returns: status code
*/
RMNET_NETLINK_DEL_VND_TC_FLOW
};
enum rmnet_config_endpoint_modes_e {
@ -205,7 +227,8 @@ enum rmnet_config_return_codes_e {
RMNET_CONFIG_INVALID_REQUEST,
RMNET_CONFIG_NO_SUCH_DEVICE,
RMNET_CONFIG_BAD_ARGUMENTS,
RMNET_CONFIG_BAD_EGRESS_DEVICE
RMNET_CONFIG_BAD_EGRESS_DEVICE,
RMNET_CONFIG_TC_HANDLE_FULL
};
#endif /* _RMNET_DATA_H_ */

View File

@ -26,6 +26,8 @@
#include "rmnet_data_vnd.h"
#include "rmnet_data_private.h"
RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);
/* ***************** Local Definitions and Declarations ********************* */
static struct sock *nl_socket_handle;
@ -316,6 +318,41 @@ static void _rmnet_netlink_get_vnd_name
resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(vnd);
}
static void _rmnet_netlink_add_del_vnd_tc_flow
(uint32_t command,
struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
uint32_t id;
uint32_t map_flow_id;
uint32_t tc_flow_id;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
id = rmnet_header->flow_control.id;
map_flow_id = rmnet_header->flow_control.map_flow_id;
tc_flow_id = rmnet_header->flow_control.tc_flow_id;
switch (command) {
case RMNET_NETLINK_ADD_VND_TC_FLOW:
resp_rmnet->return_code = rmnet_vnd_add_tc_flow(id,
map_flow_id,
tc_flow_id);
break;
case RMNET_NETLINK_DEL_VND_TC_FLOW:
resp_rmnet->return_code = rmnet_vnd_del_tc_flow(id,
map_flow_id,
tc_flow_id);
break;
default:
LOGM("%s(): called with unhandled command %d\n",
__func__, command);
resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
break;
}
}
/**
* rmnet_config_netlink_msg_handler() - Netlink message handler callback
* @skb: Packet containing netlink messages
@ -418,6 +455,13 @@ void rmnet_config_netlink_msg_handler(struct sk_buff *skb)
_rmnet_netlink_get_vnd_name(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_DEL_VND_TC_FLOW:
case RMNET_NETLINK_ADD_VND_TC_FLOW:
_rmnet_netlink_add_del_vnd_tc_flow(rmnet_header->message_type,
rmnet_header,
resp_rmnet);
break;
default:
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE;
@ -425,6 +469,7 @@ void rmnet_config_netlink_msg_handler(struct sk_buff *skb)
}
rtnl_unlock();
nlmsg_unicast(nl_socket_handle, skb_response, return_pid);
LOGD("%s(): Done processing command\n", __func__);
}

View File

@ -23,6 +23,9 @@
#include "rmnet_data_vnd.h"
#include "rmnet_map.h"
RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_HANDLER);
void rmnet_egress_handler(struct sk_buff *skb,
struct rmnet_logical_ep_conf_s *ep);

View File

@ -27,6 +27,10 @@ unsigned int rmnet_data_log_level = RMNET_LOG_LVL_ERR | RMNET_LOG_LVL_HI;
module_param(rmnet_data_log_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(log_level, "Logging level");
unsigned int rmnet_data_log_module_mask;
module_param(rmnet_data_log_module_mask, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(rmnet_data_log_module_mask, "Logging module mask");
/* ***************** Startup/Shutdown *************************************** */
/**

View File

@ -22,6 +22,7 @@
#define RMNET_ETHERNET_HEADER_LENGTH 14
extern unsigned int rmnet_data_log_level;
extern unsigned int rmnet_data_log_module_mask;
#define RMNET_INIT_OK 0
#define RMNET_INIT_ERROR 1
@ -32,6 +33,15 @@ extern unsigned int rmnet_data_log_level;
#define RMNET_LOG_LVL_HI (1<<1)
#define RMNET_LOG_LVL_ERR (1<<0)
#define RMNET_LOG_MODULE(X) \
static uint32_t rmnet_mod_mask = X
#define RMNET_DATA_LOGMASK_CONFIG (1<<0)
#define RMNET_DATA_LOGMASK_HANDLER (1<<1)
#define RMNET_DATA_LOGMASK_VND (1<<2)
#define RMNET_DATA_LOGMASK_MAPD (1<<3)
#define RMNET_DATA_LOGMASK_MAPC (1<<4)
#define LOGE(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_ERR) \
pr_err(fmt, ##__VA_ARGS__); \
} while (0)
@ -49,9 +59,13 @@ extern unsigned int rmnet_data_log_level;
pr_notice(fmt, ##__VA_ARGS__); \
} while (0)
#define LOGD(fmt, ...) do { if (unlikely \
(rmnet_data_log_level & RMNET_LOG_LVL_DBG)) \
pr_debug(fmt, ##__VA_ARGS__); \
/* Don't use pr_debug as it is compiled out of the kernel. We can be sure of
* minimal impact as LOGD is not enabled by default.
*/
#define LOGD(fmt, ...) do { if (unlikely( \
(rmnet_data_log_level & RMNET_LOG_LVL_DBG) \
&& (rmnet_data_log_module_mask & rmnet_mod_mask))) \
pr_notice(fmt, ##__VA_ARGS__); \
} while (0)
#endif /* _RMNET_DATA_PRIVATE_H_ */

View File

@ -22,20 +22,48 @@
#include <linux/if_arp.h>
#include <linux/spinlock.h>
#include <net/pkt_sched.h>
#include <linux/atomic.h>
#include "rmnet_data_config.h"
#include "rmnet_data_handlers.h"
#include "rmnet_data_private.h"
#include "rmnet_map.h"
RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_VND);
#define RMNET_MAP_FLOW_NUM_TC_HANDLE 3
#define RMNET_VND_UF_ACTION_ADD 0
#define RMNET_VND_UF_ACTION_DEL 1
enum {
RMNET_VND_UPDATE_FLOW_OK,
RMNET_VND_UPDATE_FLOW_NO_ACTION,
RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM,
RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT
};
struct net_device *rmnet_devices[RMNET_DATA_MAX_VND];
struct rmnet_map_flow_mapping_s {
struct list_head list;
uint32_t map_flow_id;
uint32_t tc_flow_valid[RMNET_MAP_FLOW_NUM_TC_HANDLE];
uint32_t tc_flow_id[RMNET_MAP_FLOW_NUM_TC_HANDLE];
atomic_t v4_seq;
atomic_t v6_seq;
};
struct rmnet_vnd_private_s {
uint8_t qos_mode:1;
uint8_t reserved:7;
struct rmnet_logical_ep_conf_s local_ep;
struct rmnet_map_flow_control_s flows;
rwlock_t flow_map_lock;
struct list_head flow_head;
};
#define RMNET_VND_FC_QUEUED 0
#define RMNET_VND_FC_NOT_ENABLED 1
#define RMNET_VND_FC_KMALLOC_ERR 2
/* ***************** Helper Functions *************************************** */
/**
@ -210,6 +238,49 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
return rc;
}
struct rmnet_vnd_fc_work {
struct work_struct work;
struct net_device *dev;
uint32_t tc_handle;
int enable;
};
static void _rmnet_vnd_wq_flow_control(struct work_struct *work)
{
struct rmnet_vnd_fc_work *fcwork;
fcwork = (struct rmnet_vnd_fc_work *)work;
rtnl_lock();
tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle, fcwork->enable);
rtnl_unlock();
LOGL("%s(): [%s] handle:%08X enable:%d\n",
__func__, fcwork->dev->name, fcwork->tc_handle, fcwork->enable);
kfree(work);
}
static int _rmnet_vnd_do_flow_control(struct net_device *dev,
uint32_t tc_handle,
int enable)
{
struct rmnet_vnd_fc_work *fcwork;
fcwork = (struct rmnet_vnd_fc_work *)
kmalloc(sizeof(struct rmnet_vnd_fc_work), GFP_ATOMIC);
if (!fcwork)
return RMNET_VND_FC_KMALLOC_ERR;
memset(fcwork, 0, sizeof(struct rmnet_vnd_fc_work));
INIT_WORK((struct work_struct *)fcwork, _rmnet_vnd_wq_flow_control);
fcwork->dev = dev;
fcwork->tc_handle = tc_handle;
fcwork->enable = enable;
schedule_work((struct work_struct *)fcwork);
return RMNET_VND_FC_QUEUED;
}
#else
static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
struct ifreq *ifr,
@ -217,6 +288,14 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
{
return -EINVAL;
}
static inline int _rmnet_vnd_do_flow_control(struct net_device *dev,
uint32_t tc_handle,
int enable)
{
LOGD("%s(): [%s] called with no QoS support", __func__, dev->name);
return RMNET_VND_FC_NOT_ENABLED;
}
#endif /* CONFIG_RMNET_DATA_FC */
/**
@ -322,8 +401,9 @@ static void rmnet_vnd_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
/* Flow control locks */
rwlock_init(&dev_conf->flows.flow_map_lock);
/* Flow control */
rwlock_init(&dev_conf->flow_map_lock);
INIT_LIST_HEAD(&dev_conf->flow_head);
}
/* ***************** Exposed API ******************************************** */
@ -515,70 +595,306 @@ struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev)
}
/**
* rmnet_vnd_get_flow_mapping() - Retrieve QoS flow mapping.
* @dev: Virtual network device node to do lookup on
* @map_flow_id: Flow ID
* @tc_handle: Pointer to TC qdisc flow handle. Results stored here
* @v4_seq: pointer to IPv4 indication sequence number. Caller can modify value
* @v6_seq: pointer to IPv6 indication sequence number. Caller can modify value
* _rmnet_vnd_get_flow_map() - Gets object representing a MAP flow handle
* @dev_conf: Private configuration structure for virtual network device
* @map_flow: MAP flow handle IF
*
* Sets flow_map to 0 on error or if no flow is configured
* todo: Add flow specific mappings
* todo: Standardize return codes.
* Loops through available flow mappings and compares the MAP flow handle.
* Returns when mapping is found.
*
* Return:
* - Null if no mapping was found
* - Pointer to mapping otherwise
*/
static struct rmnet_map_flow_mapping_s *_rmnet_vnd_get_flow_map
(struct rmnet_vnd_private_s *dev_conf,
uint32_t map_flow)
{
struct list_head *p;
struct rmnet_map_flow_mapping_s *itm;
list_for_each(p, &(dev_conf->flow_head)) {
itm = list_entry(p, struct rmnet_map_flow_mapping_s, list);
if (unlikely(!itm))
BUG();
if (itm->map_flow_id == map_flow)
return itm;
}
return 0;
}
/**
* _rmnet_vnd_update_flow_map() - Add or remove individual TC flow handles
* @action: One of RMNET_VND_UF_ACTION_ADD / RMNET_VND_UF_ACTION_DEL
* @itm: Flow mapping object
* @map_flow: TC flow handle
*
* RMNET_VND_UF_ACTION_ADD:
* Will check for a free mapping slot in the mapping object. If one is found,
* valid for that slot will be set to 1 and the value will be set.
*
* RMNET_VND_UF_ACTION_DEL:
* Will check for matching tc handle. If found, valid for that slot will be
* set to 0 and the value will also be zeroed.
*
* Return:
* - RMNET_VND_UPDATE_FLOW_OK tc flow handle is added/removed ok
* - RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM if there are no more tc handles
* - RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT if flow mapping is now empty
* - RMNET_VND_UPDATE_FLOW_NO_ACTION if no action was taken
*/
static int _rmnet_vnd_update_flow_map(uint8_t action,
struct rmnet_map_flow_mapping_s *itm,
uint32_t tc_flow)
{
int rc, i, j;
rc = RMNET_VND_UPDATE_FLOW_OK;
switch (action) {
case RMNET_VND_UF_ACTION_ADD:
rc = RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM;
for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
if (itm->tc_flow_valid[i] == 0) {
itm->tc_flow_valid[i] = 1;
itm->tc_flow_id[i] = tc_flow;
rc = RMNET_VND_UPDATE_FLOW_OK;
LOGD("%s(): {%p}->tc_flow_id[%d] = %08X\n",
__func__, itm, i, tc_flow);
break;
}
}
break;
case RMNET_VND_UF_ACTION_DEL:
j = 0;
rc = RMNET_VND_UPDATE_FLOW_OK;
for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
if (itm->tc_flow_valid[i] == 1) {
if (itm->tc_flow_id[i] == tc_flow) {
itm->tc_flow_valid[i] = 0;
itm->tc_flow_id[i] = 0;
j++;
LOGD("%s(): {%p}->tc_flow_id[%d] = 0\n",
__func__, itm, i);
}
} else {
j++;
}
}
if (j == RMNET_MAP_FLOW_NUM_TC_HANDLE)
rc = RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT;
break;
default:
rc = RMNET_VND_UPDATE_FLOW_NO_ACTION;
break;
}
return rc;
}
/**
* rmnet_vnd_add_tc_flow() - Add a MAP/TC flow handle mapping
* @id: Virtual network device ID
* @map_flow: MAP flow handle
* @tc_flow: TC flow handle
*
* Checkes for an existing flow mapping object corresponding to map_flow. If one
* is found, then it will try to add to the existing mapping object. Otherwise,
* a new mapping object is created.
*
* Return:
* - RMNET_CONFIG_OK if successful
* - RMNET_CONFIG_TC_HANDLE_FULL if there is no more room in the map object
* - RMNET_CONFIG_NOMEM failed to allocate a new map object
*/
int rmnet_vnd_add_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow)
{
struct rmnet_map_flow_mapping_s *itm;
struct net_device *dev;
struct rmnet_vnd_private_s *dev_conf;
int r;
unsigned long flags;
if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
LOGM("%s(): Invalid id [%d]\n", __func__, id);
return RMNET_CONFIG_NO_SUCH_DEVICE;
}
dev = rmnet_devices[id];
dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
if (!dev_conf)
BUG();
write_lock_irqsave(&dev_conf->flow_map_lock, flags);
itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
if (itm) {
r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_ADD,
itm, tc_flow);
if (r != RMNET_VND_UPDATE_FLOW_OK) {
write_unlock_irqrestore(&dev_conf->flow_map_lock,
flags);
return RMNET_CONFIG_TC_HANDLE_FULL;
}
write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
return RMNET_CONFIG_OK;
}
write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
itm = (struct rmnet_map_flow_mapping_s *)
kmalloc(sizeof(struct rmnet_map_flow_mapping_s), GFP_KERNEL);
if (!itm) {
LOGM("%s(): failure allocating flow mapping\n", __func__);
return RMNET_CONFIG_NOMEM;
}
memset(itm, 0, sizeof(struct rmnet_map_flow_mapping_s));
itm->map_flow_id = map_flow;
itm->tc_flow_valid[0] = 1;
itm->tc_flow_id[0] = tc_flow;
/* How can we dynamically init these safely? Kernel only provides static
* initializers for atomic_t
*/
itm->v4_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
itm->v6_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
write_lock_irqsave(&dev_conf->flow_map_lock, flags);
list_add(&(itm->list), &(dev_conf->flow_head));
write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
LOGD("%s(): Created flow mapping [%s][0x%08X][0x%08X]@%p\n", __func__,
dev->name, itm->map_flow_id, itm->tc_flow_id[0], itm);
return RMNET_CONFIG_OK;
}
/**
* rmnet_vnd_del_tc_flow() - Delete a MAP/TC flow handle mapping
* @id: Virtual network device ID
* @map_flow: MAP flow handle
* @tc_flow: TC flow handle
*
* Checkes for an existing flow mapping object corresponding to map_flow. If one
* is found, then it will try to remove the existing tc_flow mapping. If the
* mapping object no longer contains any mappings, then it is freed. Otherwise
* the mapping object is left in the list
*
* Return:
* - RMNET_CONFIG_OK if successful or if there was no such tc_flow
* - RMNET_CONFIG_INVALID_REQUEST if there is no such map_flow
*/
int rmnet_vnd_del_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow)
{
struct rmnet_vnd_private_s *dev_conf;
struct net_device *dev;
struct rmnet_map_flow_mapping_s *itm;
int r;
unsigned long flags;
int rc = RMNET_CONFIG_OK;
if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
LOGM("%s(): Invalid id [%d]\n", __func__, id);
return RMNET_CONFIG_NO_SUCH_DEVICE;
}
dev = rmnet_devices[id];
dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
if (!dev_conf)
BUG();
r = RMNET_VND_UPDATE_FLOW_NO_ACTION;
write_lock_irqsave(&dev_conf->flow_map_lock, flags);
itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
if (!itm) {
rc = RMNET_CONFIG_INVALID_REQUEST;
} else {
r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_DEL,
itm, tc_flow);
if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT)
list_del(&(itm->list));
}
write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT) {
if (itm)
LOGD("%s(): Removed flow mapping [%s][0x%08X]@%p\n",
__func__, dev->name, itm->map_flow_id, itm);
kfree(itm);
}
return rc;
}
/**
* rmnet_vnd_do_flow_control() - Process flow control request
* @dev: Virtual network device node to do lookup on
* @map_flow_id: Flow ID from MAP message
* @v4_seq: pointer to IPv4 indication sequence number
* @v6_seq: pointer to IPv6 indication sequence number
* @enable: boolean to enable/disable flow.
*
* Return:
* - 0 if successful
* - 1 if no mapping is found
* - 2 if dev is not RmNet virtual network device node
*/
int rmnet_vnd_get_flow_mapping(struct net_device *dev,
int rmnet_vnd_do_flow_control(struct net_device *dev,
uint32_t map_flow_id,
uint32_t *tc_handle,
uint64_t **v4_seq,
uint64_t **v6_seq)
uint16_t v4_seq,
uint16_t v6_seq,
int enable)
{
struct rmnet_vnd_private_s *dev_conf;
struct rmnet_map_flow_mapping_s *flowmap;
int i;
int error = 0;
struct rmnet_map_flow_mapping_s *itm;
int do_fc, error, i;
error = 0;
do_fc = 0;
if (!dev || !tc_handle)
if (unlikely(!dev))
BUG();
if (!rmnet_vnd_is_vnd(dev)) {
*tc_handle = 0;
return 2;
} else {
dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
}
if (!dev_conf)
if (unlikely(!dev_conf))
BUG();
if (map_flow_id == 0xFFFFFFFF) {
*tc_handle = dev_conf->flows.default_tc_handle;
*v4_seq = &dev_conf->flows.default_v4_seq;
*v6_seq = &dev_conf->flows.default_v6_seq;
if (*tc_handle == 0)
error = 1;
} else {
flowmap = &dev_conf->flows.flowmap[0];
for (i = 0; i < RMNET_MAP_MAX_FLOWS; i++) {
if ((flowmap[i].flow_id != 0)
&& (flowmap[i].flow_id == map_flow_id)) {
read_lock(&dev_conf->flow_map_lock);
itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow_id);
*tc_handle = flowmap[i].tc_handle;
*v4_seq = &flowmap[i].v4_seq;
*v6_seq = &flowmap[i].v6_seq;
error = 0;
break;
if (!itm) {
LOGL("%s(): Got flow control request for unknown flow %08X\n",
__func__, map_flow_id);
goto fcdone;
}
if (v4_seq == 0 || v4_seq >= atomic_read(&(itm->v4_seq))) {
atomic_set(&(itm->v4_seq), v4_seq);
for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
if (itm->tc_flow_valid[i] == 1) {
LOGD("%s(): Found [%s][0x%08X][%d:0x%08X]\n",
__func__, dev->name, itm->map_flow_id, i,
itm->tc_flow_id[i]);
_rmnet_vnd_do_flow_control(dev,
itm->tc_flow_id[i],
enable);
}
}
*v4_seq = 0;
*v6_seq = 0;
*tc_handle = 0;
error = 1;
} else {
LOGD("%s(): Internal seq(%hd) higher than called(%hd)\n",
__func__, atomic_read(&(itm->v4_seq)), v4_seq);
}
fcdone:
read_unlock(&dev_conf->flow_map_lock);
return error;
}

View File

@ -19,9 +19,11 @@
#ifndef _RMNET_DATA_VND_H_
#define _RMNET_DATA_VND_H_
int rmnet_vnd_get_flow_mapping(struct net_device *dev,
unsigned int map_flow_id,
unsigned int *flow_map);
int rmnet_vnd_do_flow_control(struct net_device *dev,
uint32_t map_flow_id,
uint16_t v4_seq,
uint16_t v6_seq,
int enable);
struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev);
int rmnet_vnd_get_name(int id, char *name, int name_len);
int rmnet_vnd_create_dev(int id, struct net_device **new_device,
@ -29,6 +31,8 @@ int rmnet_vnd_create_dev(int id, struct net_device **new_device,
int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
int rmnet_vnd_is_vnd(struct net_device *dev);
int rmnet_vnd_add_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow);
int rmnet_vnd_del_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow);
int rmnet_vnd_init(void);
void rmnet_vnd_exit(void);

View File

@ -17,8 +17,6 @@
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
#define RMNET_MAP_MAX_FLOWS 8
struct rmnet_map_header_s {
#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
uint8_t pad_len:6;
@ -60,21 +58,6 @@ struct rmnet_map_control_command_s {
};
} __aligned(1);
struct rmnet_map_flow_mapping_s {
uint32_t flow_id;
uint32_t tc_handle;
uint64_t v4_seq;
uint64_t v6_seq;
};
struct rmnet_map_flow_control_s {
rwlock_t flow_map_lock;
uint32_t default_tc_handle;
uint64_t default_v4_seq;
uint64_t default_v6_seq;
struct rmnet_map_flow_mapping_s flowmap[RMNET_MAP_MAX_FLOWS];
};
enum rmnet_map_results_e {
RMNET_MAP_SUCCESS,
RMNET_MAP_CONSUMED,
@ -138,7 +121,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header_s *)Y->data)->cd_bit)
#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header_s *)Y->data)->pad_len)
#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command_s *) \
Y->data + sizeof(struct rmnet_map_header_s))
(Y->data + sizeof(struct rmnet_map_header_s)))
#define RMNET_MAP_GET_LENGTH(Y) (ntohs( \
((struct rmnet_map_header_s *)Y->data)->pkt_len))

View File

@ -20,18 +20,95 @@
#include "rmnet_data_config.h"
#include "rmnet_map.h"
#include "rmnet_data_private.h"
#include "rmnet_data_vnd.h"
RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPC);
unsigned long int rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH];
module_param_array(rmnet_map_command_stats, ulong, 0, S_IRUGO);
MODULE_PARM_DESC(rmnet_map_command_stats, "MAP command statistics");
/**
* rmnet_map_do_flow_control() - Process MAP flow control command
* @skb: Socket buffer containing the MAP flow control message
* @config: Physical end-point configuration of ingress device
* @enable: boolean for enable/disable
*
* Process in-band MAP flow control messages. Assumes mux ID is mapped to a
* RmNet Data vitrual network device.
*
* Return:
* - RMNET_MAP_COMMAND_UNSUPPORTED on any error
* - RMNET_MAP_COMMAND_ACK on success
*/
static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_phys_ep_conf_s *config,
int enable)
{
return RMNET_MAP_COMMAND_UNSUPPORTED;
struct rmnet_map_control_command_s *cmd;
struct net_device *vnd;
struct rmnet_logical_ep_conf_s *ep;
uint8_t mux_id;
uint16_t ip_family;
uint16_t fc_seq;
uint32_t qos_id;
int r;
if (unlikely(!skb || !config))
BUG();
mux_id = RMNET_MAP_GET_MUX_ID(skb);
cmd = RMNET_MAP_GET_CMD_START(skb);
if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
LOGD("%s(): Got packet on %s with bad mux id %d\n",
__func__, skb->dev->name, mux_id);
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
ep = &(config->muxed_ep[mux_id]);
if (!ep->refcount) {
LOGD("%s(): Packet on %s:%d; has no logical endpoint config\n",
__func__, skb->dev->name, mux_id);
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable);
LOGD("%s(): dev:%s, qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d\n",
__func__, skb->dev->name, qos_id, ip_family & 3, fc_seq, enable);
if (r)
return RMNET_MAP_COMMAND_UNSUPPORTED;
else
return RMNET_MAP_COMMAND_ACK;
}
/**
* rmnet_map_send_ack() - Send N/ACK message for MAP commands
* @skb: Socket buffer containing the MAP command message
* @type: N/ACK message selector
*
* skb is modified to contain the message type selector. The message is then
* transmitted on skb->dev. Note that this function grabs global Tx lock on
* skb->dev for latency reasons.
*
* Return:
* - void
*/
static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type)
{
@ -53,6 +130,17 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
spin_unlock_irqrestore(&(skb->dev->tx_global_lock), flags);
}
/**
* rmnet_map_command() - Entry point for handling MAP commands
* @skb: Socket buffer containing the MAP command message
* @config: Physical end-point configuration of ingress device
*
* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*
* Return:
* - RX_HANDLER_CONSUMED. Command frames are always consumed.
*/
rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
struct rmnet_phys_ep_conf_s *config)
{
@ -60,7 +148,7 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
unsigned char command_name;
unsigned char rc = 0;
if (!skb)
if (unlikely(!skb))
BUG();
cmd = RMNET_MAP_GET_CMD_START(skb);
@ -85,5 +173,5 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
break;
}
rmnet_map_send_ack(skb, rc);
return 0; /* TODO: handler_consumed */
return RX_HANDLER_CONSUMED;
}

View File

@ -25,6 +25,8 @@
#include "rmnet_map.h"
#include "rmnet_data_private.h"
RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
/* ***************** Local Definitions ************************************** */
struct agg_work {
struct delayed_work work;