net: xen-netback: use API provided by xenbus module to map rings

The xenbus module provides xenbus_map_ring_valloc() and
xenbus_map_ring_vfree().  Use these to map the Tx and Rx ring pages
granted by the frontend.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
David Vrabel 2011-09-29 16:53:31 +01:00 committed by Konrad Rzeszutek Wilk
parent 2d073846b8
commit c9d6369978
2 changed files with 22 additions and 69 deletions

View file

@ -58,10 +58,6 @@ struct xenvif {
u8 fe_dev_addr[6];
/* Physical parameters of the comms window. */
grant_handle_t tx_shmem_handle;
grant_ref_t tx_shmem_ref;
grant_handle_t rx_shmem_handle;
grant_ref_t rx_shmem_ref;
unsigned int irq;
/* List of frontends to notify after a batch of frames sent. */
@ -70,8 +66,6 @@ struct xenvif {
/* The shared rings and indexes. */
struct xen_netif_tx_back_ring tx;
struct xen_netif_rx_back_ring rx;
struct vm_struct *tx_comms_area;
struct vm_struct *rx_comms_area;
/* Frontend feature information. */
u8 can_sg:1;
@ -106,6 +100,11 @@ struct xenvif {
wait_queue_head_t waiting_to_free;
};
static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
{
return to_xenbus_device(vif->dev->dev.parent);
}
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)

View file

@ -1577,88 +1577,42 @@ static int xen_netbk_kthread(void *data)
void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
{
struct gnttab_unmap_grant_ref op;
if (vif->tx.sring) {
gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr,
GNTMAP_host_map, vif->tx_shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
if (vif->rx.sring) {
gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
GNTMAP_host_map, vif->rx_shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
if (vif->rx_comms_area)
free_vm_area(vif->rx_comms_area);
if (vif->tx_comms_area)
free_vm_area(vif->tx_comms_area);
if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->tx.sring);
if (vif->rx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->rx.sring);
}
int xen_netbk_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
struct gnttab_map_grant_ref op;
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err = -ENOMEM;
vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
if (vif->tx_comms_area == NULL)
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
tx_ring_ref, &addr);
if (err)
goto err;
vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
if (vif->rx_comms_area == NULL)
goto err;
gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
GNTMAP_host_map, tx_ring_ref, vif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status) {
netdev_warn(vif->dev,
"failed to map tx ring. err=%d status=%d\n",
err, op.status);
err = op.status;
goto err;
}
vif->tx_shmem_ref = tx_ring_ref;
vif->tx_shmem_handle = op.handle;
txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
txs = (struct xen_netif_tx_sring *)addr;
BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
GNTMAP_host_map, rx_ring_ref, vif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status) {
netdev_warn(vif->dev,
"failed to map rx ring. err=%d status=%d\n",
err, op.status);
err = op.status;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
rx_ring_ref, &addr);
if (err)
goto err;
}
vif->rx_shmem_ref = rx_ring_ref;
vif->rx_shmem_handle = op.handle;
vif->rx_req_cons_peek = 0;
rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
vif->rx_req_cons_peek = 0;
return 0;
err: