Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
	drivers/net/ethernet/emulex/benet/be.h
	include/net/tcp.h
	net/mac802154/mac802154.h

Most conflicts were minor overlapping stuff.

The be2net driver brought in some fixes that added __vlan_put_tag
calls, which in net-next take an additional argument.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-04-30 03:50:54 -04:00
commit 58717686cf
26 changed files with 293 additions and 104 deletions

View File

@ -578,6 +578,7 @@ data_sock_getname(struct socket *sock, struct sockaddr *addr,
lock_sock(sk);
*addr_len = sizeof(*maddr);
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xff;

View File

@ -33,8 +33,8 @@ static unsigned long ram[] = {0, 0, 0, 0};
static bool do_reset = 0;
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(ram, int, NULL, 0);
module_param_array(irq, byte, NULL, 0);
module_param_array(ram, long, NULL, 0);
module_param(do_reset, bool, 0);
static int identify_board(unsigned long, unsigned int);

View File

@ -1917,14 +1917,16 @@ err_detach:
bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
write_unlock_bh(&bond->lock);
if (bond->curr_active_slave == new_slave) {
bond_change_active_slave(bond, NULL);
write_unlock_bh(&bond->lock);
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
} else {
write_unlock_bh(&bond->lock);
}
slave_disable_netpoll(new_slave);

View File

@ -863,6 +863,7 @@ static int __init cfspi_init_module(void)
driver_remove_file(&cfspi_spi_driver.driver,
&driver_attr_up_head_align);
err_create_up_head_align:
platform_driver_unregister(&cfspi_spi_driver);
err_dev_register:
return result;
}

View File

@ -1044,6 +1044,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
DP(NETIF_MSG_INTR,
"got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
fp->index, fp->fw_sb_id, fp->igu_sb_id);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
#ifdef BNX2X_STOP_ON_ERROR
@ -1725,7 +1726,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
}
static int bnx2x_setup_irqs(struct bnx2x *bp)
int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG &&
@ -2581,6 +2582,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
bnx2x_pre_irq_nic_init(bp);
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
@ -2590,11 +2593,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error2);
}
/* Setup NIC internals and enable interrupts */
bnx2x_nic_init(bp, load_code);
/* Init per-function objects */
if (IS_PF(bp)) {
/* Setup NIC internals and enable interrupts */
bnx2x_post_irq_nic_init(bp, load_code);
bnx2x_init_bp_objs(bp);
bnx2x_iov_nic_init(bp);

View File

@ -295,16 +295,29 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
void bnx2x_nic_init_cnic(struct bnx2x *bp);
/**
* bnx2x_nic_init - init driver internals.
* bnx2x_preirq_nic_init - init driver internals.
*
* @bp: driver handle
*
* Initializes:
* - rings
* - fastpath object
* - fastpath rings
* etc.
*/
void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
/**
* bnx2x_postirq_nic_init - init driver internals.
*
* @bp: driver handle
* @load_code: COMMON, PORT or FUNCTION
*
* Initializes:
* - status blocks
* - slowpath rings
* - etc.
*/
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
/**
* bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
*

View File

@ -6030,10 +6030,11 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
mmiowb();
}
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
{
int i;
/* Setup NIC internals and enable interrupts */
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
@ -6041,19 +6042,27 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
rmb();
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_rings(bp);
if (IS_VF(bp)) {
bnx2x_memset_stats(bp);
return;
}
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base, bp->common.shmem2_base,
BP_PORT(bp));
if (IS_PF(bp)) {
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base,
bp->common.shmem2_base, BP_PORT(bp));
bnx2x_init_def_sb(bp);
bnx2x_update_dsb_idx(bp);
bnx2x_init_sp_ring(bp);
/* initialize the default status block and sp ring */
bnx2x_init_def_sb(bp);
bnx2x_update_dsb_idx(bp);
bnx2x_init_sp_ring(bp);
}
}
void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
{
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
bnx2x_pf_init(bp);
@ -6071,12 +6080,7 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
AEU_INPUTS_ATTN_BITS_SPIO5);
}
/* end of nic init */
/*
* gzip service functions
*/
/* gzip service functions */
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,

View File

@ -329,6 +329,7 @@ enum vf_state {
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
struct phy_info {
u8 transceiver;
@ -436,6 +437,7 @@ struct be_adapter {
bool wol;
u32 uc_macs; /* Count of secondary UC MAC programmed */
u16 asic_rev;
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
u16 max_mcast_mac;
@ -651,6 +653,11 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
}
}
static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
{
return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
}
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);

View File

@ -263,6 +263,27 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
}
}
static void be_async_dbg_evt_process(struct be_adapter *adapter,
u32 trailer, struct be_mcc_compl *cmp)
{
u8 event_type = 0;
struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
ASYNC_TRAILER_EVENT_TYPE_MASK;
switch (event_type) {
case ASYNC_DEBUG_EVENT_TYPE_QNQ:
if (evt->valid)
adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
break;
default:
dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
break;
}
}
static inline bool is_link_state_evt(u32 trailer)
{
return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
@ -277,6 +298,13 @@ static inline bool is_grp5_evt(u32 trailer)
ASYNC_EVENT_CODE_GRP_5);
}
static inline bool is_dbg_evt(u32 trailer)
{
return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
ASYNC_TRAILER_EVENT_CODE_MASK) ==
ASYNC_EVENT_CODE_QNQ);
}
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
{
struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
@ -325,6 +353,9 @@ int be_process_mcc(struct be_adapter *adapter)
else if (is_grp5_evt(compl->flags))
be_async_grp5_evt_process(adapter,
compl->flags, compl);
else if (is_dbg_evt(compl->flags))
be_async_dbg_evt_process(adapter,
compl->flags, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(adapter, compl);
atomic_dec(&mcc_obj->q.used);
@ -1020,6 +1051,7 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@ -2457,6 +2489,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
struct mgmt_controller_attrib *attribs;
struct be_dma_mem attribs_cmd;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
@ -2464,12 +2499,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure\n");
return -ENOMEM;
status = -ENOMEM;
goto err;
}
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
if (!wrb) {
status = -EBUSY;
@ -2489,8 +2522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
err:
mutex_unlock(&adapter->mbox_lock);
pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
attribs_cmd.dma);
if (attribs_cmd.va)
pci_free_consistent(adapter->pdev, attribs_cmd.size,
attribs_cmd.va, attribs_cmd.dma);
return status;
}
@ -2788,6 +2822,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
CMD_SUBSYSTEM_ETH))
return -EPERM;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@ -2795,12 +2832,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
if (!cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure\n");
return -ENOMEM;
status = -ENOMEM;
goto err;
}
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
if (!wrb) {
status = -EBUSY;
@ -2831,7 +2866,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
}
err:
mutex_unlock(&adapter->mbox_lock);
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
if (cmd.va)
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
return status;
}
@ -2964,16 +3000,18 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
int status;
struct be_dma_mem cmd;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
&cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
return -ENOMEM;
status = -ENOMEM;
goto err;
}
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
if (!wrb) {
@ -3016,8 +3054,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
}
err:
mutex_unlock(&adapter->mbox_lock);
pci_free_consistent(adapter->pdev, cmd.size,
cmd.va, cmd.dma);
if (cmd.va)
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
return status;
}

View File

@ -84,6 +84,9 @@ struct be_mcc_compl {
#define ASYNC_EVENT_QOS_SPEED 0x1
#define ASYNC_EVENT_COS_PRIORITY 0x2
#define ASYNC_EVENT_PVID_STATE 0x3
#define ASYNC_EVENT_CODE_QNQ 0x6
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
struct be_async_event_trailer {
u32 code;
};
@ -144,6 +147,16 @@ struct be_async_event_grp5_pvid_state {
struct be_async_event_trailer trailer;
} __packed;
/* async event indicating outer VLAN tag in QnQ */
struct be_async_event_qnq {
u8 valid; /* Indicates if outer VLAN is valid */
u8 rsvd0;
u16 vlan_tag;
u32 event_tag;
u8 rsvd1[4];
struct be_async_event_trailer trailer;
} __packed;
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;

View File

@ -680,7 +680,8 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (be_is_wol_supported(adapter)) {
wol->supported |= WAKE_MAGIC;
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol)
wol->wolopts |= WAKE_MAGIC;
} else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));

View File

@ -639,13 +639,8 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return vlan_tx_tag_present(skb) || adapter->pvid;
}
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
{
u16 vlan_tag;
@ -672,8 +667,9 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
}
/* To skip HW VLAN tagging: evt = 1, compl = 0 */
AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
@ -696,7 +692,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
@ -745,7 +742,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
queue_head_inc(txq);
}
wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
be_dws_cpu_to_le(hdr, sizeof(*hdr));
return copied;
@ -762,7 +759,8 @@ dma_err:
}
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
struct sk_buff *skb)
struct sk_buff *skb,
bool *skip_hw_vlan)
{
u16 vlan_tag = 0;
@ -777,9 +775,67 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
skb->vlan_tci = 0;
}
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
if (!vlan_tag)
vlan_tag = adapter->pvid;
if (skip_hw_vlan)
*skip_hw_vlan = true;
}
if (vlan_tag) {
skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
if (unlikely(!skb))
return skb;
skb->vlan_tci = 0;
}
/* Insert the outer VLAN, if any */
if (adapter->qnq_vid) {
vlan_tag = adapter->qnq_vid;
skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
if (unlikely(!skb))
return skb;
if (skip_hw_vlan)
*skip_hw_vlan = true;
}
return skb;
}
static bool be_ipv6_exthdr_check(struct sk_buff *skb)
{
struct ethhdr *eh = (struct ethhdr *)skb->data;
u16 offset = ETH_HLEN;
if (eh->h_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
offset += sizeof(struct ipv6hdr);
if (ip6h->nexthdr != NEXTHDR_TCP &&
ip6h->nexthdr != NEXTHDR_UDP) {
struct ipv6_opt_hdr *ehdr =
(struct ipv6_opt_hdr *) (skb->data + offset);
/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
if (ehdr->hdrlen == 0xff)
return true;
}
}
return false;
}
static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return BE3_chip(adapter) &&
be_ipv6_exthdr_check(skb);
}
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@ -790,33 +846,64 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 wrb_cnt = 0, copied = 0;
u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;
bool skip_hw_vlan = false;
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
/* HW has a bug which considers padding bytes as legal
* and modifies the IPv4 hdr's 'tot_len' field
/* For padded packets, BE HW modifies tot_len field in IP header
* incorrecly when VLAN tag is inserted by HW.
*/
if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
is_ipv4_pkt(skb)) {
if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
* tagging in UMC mode
*/
if ((adapter->function_mode & UMC_ENABLED) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
* Manually insert VLAN in pkt.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL &&
be_vlan_tag_chk(adapter, skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb);
vlan_tx_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
if (unlikely(!skb))
goto tx_drop;
}
/* HW may lockup when VLAN HW tagging is requested on
* certain ipv6 packets. Drop such pkts if the HW workaround to
* skip HW tagging is not enabled by FW.
*/
if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
(adapter->pvid || adapter->qnq_vid) &&
!qnq_async_evt_rcvd(adapter)))
goto tx_drop;
/* Manual VLAN tag insertion to prevent:
* ASIC lockup when the ASIC inserts VLAN tag into
* certain ipv6 packets. Insert VLAN tags in driver,
* and set event, completion, vlan bits accordingly
* in the Tx WRB.
*/
if (be_ipv6_tx_stall_chk(adapter, skb) &&
be_vlan_tag_tx_chk(adapter, skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
if (unlikely(!skb))
goto tx_drop;
}
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
skip_hw_vlan);
if (copied) {
int gso_segs = skb_shinfo(skb)->gso_segs;

View File

@ -130,7 +130,6 @@ struct gianfar_ptp_registers {
#define DRIVER "gianfar_ptp"
#define DEFAULT_CKSEL 1
#define N_ALARM 1 /* first alarm is used internally to reset fipers */
#define N_EXT_TS 2
#define REG_SIZE sizeof(struct gianfar_ptp_registers)
@ -413,7 +412,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.owner = THIS_MODULE,
.name = "gianfar clock",
.max_adj = 512000,
.n_alarm = N_ALARM,
.n_alarm = 0,
.n_ext_ts = N_EXT_TS,
.n_per_out = 0,
.pps = 1,

View File

@ -5896,6 +5896,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}
/* 8168evl does not automatically pad to minimum length. */
if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
skb->len < ETH_ZLEN)) {
if (skb_padto(skb, ETH_ZLEN))
goto err_update_stats;
skb_put(skb, ETH_ZLEN - skb->len);
}
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
@ -5967,6 +5975,7 @@ err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
dev_kfree_skb(skb);
err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;

View File

@ -667,7 +667,7 @@ fail:
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
size_t outlen, offset, i;
int port_num = efx_port_num(efx);
int rc;

View File

@ -1471,14 +1471,17 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!tun)
return -EBADFD;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
ret = -EINVAL;
goto out;
}
ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
out:
tun_put(tun);
return ret;
}
@ -1593,8 +1596,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
if (tun->flags & TUN_TAP_MQ &&
(tun->numqueues + tun->numdisabled > 1))
return -EBUSY;
(tun->numqueues + tun->numdisabled > 1)) {
/* One or more queue has already been attached, no need
* to initialize the device again.
*/
return 0;
}
}
else {
char *name;

View File

@ -1253,7 +1253,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
goto out;
*uaddr_len = sizeof(struct sockaddr_at);
memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
memset(&sat, 0, sizeof(sat));
if (peer) {
err = -ENOTCONN;

View File

@ -939,7 +939,7 @@ out:
}
static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
struct sk_buff *skb) {
struct sk_buff *skb, int hdr_len) {
uint8_t curr_ttvn, old_ttvn;
struct batadv_orig_node *orig_node;
struct ethhdr *ethhdr;
@ -948,7 +948,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
int is_old_ttvn;
/* check if there is enough data before accessing it */
if (pskb_may_pull(skb, sizeof(*unicast_packet) + ETH_HLEN) < 0)
if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0)
return 0;
/* create a copy of the skb (in case of for re-routing) to modify it. */
@ -956,7 +956,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
return 0;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
ethhdr = (struct ethhdr *)(skb->data + hdr_len);
/* check if the destination client was served by this node and it is now
* roaming. In this case, it means that the node has got a ROAM_ADV
@ -1072,8 +1072,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
if (check < 0)
return NET_RX_DROP;
if (!batadv_check_unicast_ttvn(bat_priv, skb))
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
return NET_RX_DROP;
/* packet for me */
@ -1117,7 +1116,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
return NET_RX_DROP;
if (!batadv_check_unicast_ttvn(bat_priv, skb))
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
return NET_RX_DROP;
unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;

View File

@ -78,9 +78,10 @@ static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int syn
return autoremove_wake_function(wait, mode, sync, key);
}
/*
* Wait for a packet..
* Wait for the last received packet to be different from skb
*/
static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
const struct sk_buff *skb)
{
int error;
DEFINE_WAIT_FUNC(wait, receiver_wake_function);
@ -92,7 +93,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
if (error)
goto out_err;
if (!skb_queue_empty(&sk->sk_receive_queue))
if (sk->sk_receive_queue.prev != skb)
goto out;
/* Socket shut down? */
@ -131,9 +132,9 @@ out_noerr:
* __skb_recv_datagram - Receive a datagram skbuff
* @sk: socket
* @flags: MSG_ flags
* @peeked: returns non-zero if this packet has been seen before
* @off: an offset in bytes to peek skb from. Returns an offset
* within an skb where data actually starts
* @peeked: returns non-zero if this packet has been seen before
* @err: error code returned
*
* Get a datagram skbuff, understands the peeking, nonblocking wakeups
@ -161,7 +162,7 @@ out_noerr:
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
int *peeked, int *off, int *err)
{
struct sk_buff *skb;
struct sk_buff *skb, *last;
long timeo;
/*
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@ -182,13 +183,17 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
*/
unsigned long cpu_flags;
struct sk_buff_head *queue = &sk->sk_receive_queue;
int _off = *off;
last = (struct sk_buff *)queue;
spin_lock_irqsave(&queue->lock, cpu_flags);
skb_queue_walk(queue, skb) {
last = skb;
*peeked = skb->peeked;
if (flags & MSG_PEEK) {
if (*off >= skb->len && skb->len) {
*off -= skb->len;
if (_off >= skb->len && (skb->len || _off ||
skb->peeked)) {
_off -= skb->len;
continue;
}
skb->peeked = 1;
@ -197,6 +202,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
__skb_unlink(skb, queue);
spin_unlock_irqrestore(&queue->lock, cpu_flags);
*off = _off;
return skb;
}
spin_unlock_irqrestore(&queue->lock, cpu_flags);
@ -206,7 +212,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
if (!timeo)
goto no_packet;
} while (!wait_for_packet(sk, err, &timeo));
} while (!wait_for_more_packets(sk, err, &timeo, last));
return NULL;

View File

@ -1927,6 +1927,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;
skb_dst_force(skb);
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {

View File

@ -88,7 +88,7 @@ struct mac802154_sub_if_data {
#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
#define MAC802154_CHAN_NONE 0xff /* No channel is assigned */
extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;

View File

@ -38,14 +38,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
if (ret > 0)
break;
if (!ret)
return 0;
return -EINVAL;
dataoff += *matchoff;
}
/* Empty callid is useless */
if (!*matchlen)
return -EINVAL;
/* Too large is useless */
if (*matchlen > IP_VS_PEDATA_MAXLEN)
return -EINVAL;

View File

@ -834,6 +834,8 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
memset(&sax->fsa_ax25, 0, sizeof(struct sockaddr_ax25));
lock_sock(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {

View File

@ -508,12 +508,13 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
llcp_sock->dsap, llcp_sock->ssap);
uaddr->sa_family = AF_NFC;
memset(llcp_addr, 0, sizeof(*llcp_addr));
*len = sizeof(struct sockaddr_nfc_llcp);
llcp_addr->sa_family = AF_NFC;
llcp_addr->dev_idx = llcp_sock->dev->idx;
llcp_addr->target_idx = llcp_sock->target_idx;
llcp_addr->nfc_protocol = llcp_sock->nfc_protocol;
llcp_addr->dsap = llcp_sock->dsap;
llcp_addr->ssap = llcp_sock->ssap;
llcp_addr->service_name_len = llcp_sock->service_name_len;

View File

@ -1858,10 +1858,10 @@ out:
}
/*
* Sleep until data has arrive. But check for races..
* Sleep until more data has arrived. But check for races..
*/
static long unix_stream_data_wait(struct sock *sk, long timeo)
static long unix_stream_data_wait(struct sock *sk, long timeo,
struct sk_buff *last)
{
DEFINE_WAIT(wait);
@ -1870,7 +1870,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!skb_queue_empty(&sk->sk_receive_queue) ||
if (skb_peek_tail(&sk->sk_receive_queue) != last ||
sk->sk_err ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
@ -1889,8 +1889,6 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
return timeo;
}
static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
@ -1935,14 +1933,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
skip = sk_peek_offset(sk, flags);
do {
int chunk;
struct sk_buff *skb;
struct sk_buff *skb, *last;
unix_state_lock(sk);
skb = skb_peek(&sk->sk_receive_queue);
last = skb = skb_peek(&sk->sk_receive_queue);
again:
if (skb == NULL) {
unix_sk(sk)->recursion_level = 0;
@ -1965,7 +1961,7 @@ again:
break;
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo);
timeo = unix_stream_data_wait(sk, timeo, last);
if (signal_pending(current)
|| mutex_lock_interruptible(&u->readlock)) {
@ -1979,10 +1975,13 @@ again:
break;
}
if (skip >= skb->len) {
skip = sk_peek_offset(sk, flags);
while (skip >= skb->len) {
skip -= skb->len;
last = skb;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
goto again;
if (!skb)
goto again;
}
unix_state_unlock(sk);

View File

@ -165,7 +165,7 @@ static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
static DEFINE_SPINLOCK(vsock_table_lock);
static __init void vsock_init_tables(void)
static void vsock_init_tables(void)
{
int i;
@ -1932,7 +1932,6 @@ static const struct file_operations vsock_device_ops = {
static struct miscdevice vsock_device = {
.name = "vsock",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vsock_device_ops,
};
@ -1942,6 +1941,7 @@ static int __vsock_core_init(void)
vsock_init_tables();
vsock_device.minor = MISC_DYNAMIC_MINOR;
err = misc_register(&vsock_device);
if (err) {
pr_err("Failed to register misc device\n");