mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
IEEE 1394 (FireWire) subsystem updates post v3.3:
- Some SBP-2 initiator fixes, side product from ongoing work on a target. - Reintroduction of an isochronous I/O feature of the older ieee1394 driver stack (flush buffer completions); it was evidently rarely used but not actually unused. Matching libraw1394 code is already available. - Be sure to prefix all kernel log messages with device name or card name, and other logging related cleanups. - Misc other small cleanups, among them a small API change that affects sound/firewire/ too. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.17 (GNU/Linux) iQIcBAABAgAGBQJPa5I1AAoJEHnzb7JUXXnQvUoQAMl9PhUk5ZFhWp0AOnQ4uLhI lEfRnUp94kGBdazBhxM9wtAwZRAeXUev/JyxwymMKSG40dMTbuxqRcs71v6a+ifd VqNctL0yUncrOw/92l+TG2t/hWttB4u+dTKYX2U5yza42+uUHWMZb7MzmV+qVYc8 H+NR71WLQM4wkWdX8LBxmdeAOm0X635cjKsC/5FX9dws7q1ebSoxs4q4iIaGR7W8 ETWx5lh/UVyR7c9T+VIr0jfQWdsm2IcmHr/+nldlesePZ1gRjIEi69ErEnGxTkGe NLPwt9lWuFXgWWHBON7C/rLmBA+NSys9lbvRAsPXrb3GpOKlde81c7U7Kr/kmEkh hB9oM2Qh0A/7sglvIZiDUP565lqOAbXSJzziG3+0XgOP2zsxukm5gSecF8qM8tHY IDwN05R9+nc26NA5TOfaRWx08n9SqTxq4V326oz9WMuK4bosCEfg4dvMwyMK/V3i AyipAl2YYIG/2JURMFcGSKbw33dBw3mRsS8XG3MXwzagUMw/8tSyZKQIwF9qO4si 69QV7+CJoEfbJiLJMZJnKrRjfU+ZVRNA/xFuHUmhpmvYIbN8iJVGpGZABfXBUcH0 c1+qX9zE4NEAUEylbgn5raYSY6otF51O8QJzQOn2HRddBQSDpEwhkOGVfZ7zcSLH sjAOn9qLIMHnrxUXxBDP =oWbr -----END PGP SIGNATURE----- Merge tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394 Pull IEEE 1394 (FireWire) subsystem updates post v3.3 from Stefan Richter: - Some SBP-2 initiator fixes, side product from ongoing work on a target. - Reintroduction of an isochronous I/O feature of the older ieee1394 driver stack (flush buffer completions); it was evidently rarely used but not actually unused. Matching libraw1394 code is already available. - Be sure to prefix all kernel log messages with device name or card name, and other logging related cleanups. - Misc other small cleanups, among them a small API change that affects sound/firewire/ too. Clemens Ladisch is aware of it. * tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394: (26 commits) firewire: allow explicit flushing of iso packet completions firewire: prevent dropping of completed iso packet header data firewire: ohci: factor out iso completion flushing code firewire: ohci: simplify iso header pointer arithmetic firewire: ohci: optimize control bit checks firewire: ohci: remove unused excess_bytes field firewire: ohci: copy_iso_headers(): make comment match the code firewire: cdev: fix IR multichannel event documentation firewire: ohci: fix too-early completion of IR multichannel buffers firewire: ohci: move runtime debug facility out of #ifdef firewire: tone down some diagnostic log messages firewire: sbp2: replace a GFP_ATOMIC allocation firewire: sbp2: Fix SCSI sense data mangling firewire: sbp2: Ignore SBP-2 targets on the local node firewire: sbp2: Take into account Unit_Unique_ID firewire: nosy: Use the macro DMA_BIT_MASK(). firewire: core: convert AR-req handler lock from _irqsave to _bh firewire: core: fix race at address_handler unregistration firewire: core: remove obsolete comment firewire: core: prefix log messages with card name ...
This commit is contained in:
commit
34699403e9
16 changed files with 478 additions and 325 deletions
|
@ -28,11 +28,6 @@ config FIREWIRE_OHCI
|
|||
To compile this driver as a module, say M here: The module will be
|
||||
called firewire-ohci.
|
||||
|
||||
config FIREWIRE_OHCI_DEBUG
|
||||
bool
|
||||
depends on FIREWIRE_OHCI
|
||||
default y
|
||||
|
||||
config FIREWIRE_SBP2
|
||||
tristate "Storage devices (SBP-2 protocol)"
|
||||
depends on FIREWIRE && SCSI
|
||||
|
|
|
@ -37,6 +37,22 @@
|
|||
|
||||
#include "core.h"
|
||||
|
||||
#define define_fw_printk_level(func, kern_level) \
|
||||
void func(const struct fw_card *card, const char *fmt, ...) \
|
||||
{ \
|
||||
struct va_format vaf; \
|
||||
va_list args; \
|
||||
\
|
||||
va_start(args, fmt); \
|
||||
vaf.fmt = fmt; \
|
||||
vaf.va = &args; \
|
||||
printk(kern_level KBUILD_MODNAME " %s: %pV", \
|
||||
dev_name(card->device), &vaf); \
|
||||
va_end(args); \
|
||||
}
|
||||
define_fw_printk_level(fw_err, KERN_ERR);
|
||||
define_fw_printk_level(fw_notice, KERN_NOTICE);
|
||||
|
||||
int fw_compute_block_crc(__be32 *block)
|
||||
{
|
||||
int length;
|
||||
|
@ -260,7 +276,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
|
|||
fw_iso_resource_manage(card, generation, 1ULL << 31,
|
||||
&channel, &bandwidth, true);
|
||||
if (channel != 31) {
|
||||
fw_notify("failed to allocate broadcast channel\n");
|
||||
fw_notice(card, "failed to allocate broadcast channel\n");
|
||||
return;
|
||||
}
|
||||
card->broadcast_channel_allocated = true;
|
||||
|
@ -343,14 +359,14 @@ static void bm_work(struct work_struct *work)
|
|||
|
||||
if (!card->irm_node->link_on) {
|
||||
new_root_id = local_id;
|
||||
fw_notify("%s, making local node (%02x) root.\n",
|
||||
fw_notice(card, "%s, making local node (%02x) root\n",
|
||||
"IRM has link off", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
|
||||
if (irm_is_1394_1995_only && !keep_this_irm) {
|
||||
new_root_id = local_id;
|
||||
fw_notify("%s, making local node (%02x) root.\n",
|
||||
fw_notice(card, "%s, making local node (%02x) root\n",
|
||||
"IRM is not 1394a compliant", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
|
@ -405,7 +421,7 @@ static void bm_work(struct work_struct *work)
|
|||
* root, and thus, IRM.
|
||||
*/
|
||||
new_root_id = local_id;
|
||||
fw_notify("%s, making local node (%02x) root.\n",
|
||||
fw_notice(card, "%s, making local node (%02x) root\n",
|
||||
"BM lock failed", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
|
@ -478,8 +494,8 @@ static void bm_work(struct work_struct *work)
|
|||
spin_unlock_irq(&card->lock);
|
||||
|
||||
if (do_reset) {
|
||||
fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
|
||||
card->index, new_root_id, gap_count);
|
||||
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
|
||||
new_root_id, gap_count);
|
||||
fw_send_phy_config(card, new_root_id, generation, gap_count);
|
||||
reset_bus(card, true);
|
||||
/* Will allocate broadcast channel after the reset. */
|
||||
|
@ -634,6 +650,11 @@ static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
|
|||
{
|
||||
}
|
||||
|
||||
static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static const struct fw_card_driver dummy_driver_template = {
|
||||
.read_phy_reg = dummy_read_phy_reg,
|
||||
.update_phy_reg = dummy_update_phy_reg,
|
||||
|
@ -646,6 +667,7 @@ static const struct fw_card_driver dummy_driver_template = {
|
|||
.set_iso_channels = dummy_set_iso_channels,
|
||||
.queue_iso = dummy_queue_iso,
|
||||
.flush_queue_iso = dummy_flush_queue_iso,
|
||||
.flush_iso_completions = dummy_flush_iso_completions,
|
||||
};
|
||||
|
||||
void fw_card_release(struct kref *kref)
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
/*
|
||||
* ABI version history is documented in linux/firewire-cdev.h.
|
||||
*/
|
||||
#define FW_CDEV_KERNEL_VERSION 4
|
||||
#define FW_CDEV_KERNEL_VERSION 5
|
||||
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
|
||||
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
|
||||
|
||||
|
@ -389,7 +389,7 @@ static void queue_bus_reset_event(struct client *client)
|
|||
|
||||
e = kzalloc(sizeof(*e), GFP_KERNEL);
|
||||
if (e == NULL) {
|
||||
fw_notify("Out of memory when allocating event\n");
|
||||
fw_notice(client->device->card, "out of memory when allocating event\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -438,6 +438,7 @@ union ioctl_arg {
|
|||
struct fw_cdev_send_phy_packet send_phy_packet;
|
||||
struct fw_cdev_receive_phy_packets receive_phy_packets;
|
||||
struct fw_cdev_set_iso_channels set_iso_channels;
|
||||
struct fw_cdev_flush_iso flush_iso;
|
||||
};
|
||||
|
||||
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
|
||||
|
@ -691,7 +692,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
|
|||
r = kmalloc(sizeof(*r), GFP_ATOMIC);
|
||||
e = kmalloc(sizeof(*e), GFP_ATOMIC);
|
||||
if (r == NULL || e == NULL) {
|
||||
fw_notify("Out of memory when allocating event\n");
|
||||
fw_notice(card, "out of memory when allocating event\n");
|
||||
goto failed;
|
||||
}
|
||||
r->card = card;
|
||||
|
@ -928,7 +929,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
|
|||
|
||||
e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
|
||||
if (e == NULL) {
|
||||
fw_notify("Out of memory when allocating event\n");
|
||||
fw_notice(context->card, "out of memory when allocating event\n");
|
||||
return;
|
||||
}
|
||||
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
|
||||
|
@ -948,7 +949,7 @@ static void iso_mc_callback(struct fw_iso_context *context,
|
|||
|
||||
e = kmalloc(sizeof(*e), GFP_ATOMIC);
|
||||
if (e == NULL) {
|
||||
fw_notify("Out of memory when allocating event\n");
|
||||
fw_notice(context->card, "out of memory when allocating event\n");
|
||||
return;
|
||||
}
|
||||
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
|
||||
|
@ -1168,6 +1169,16 @@ static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
|
|||
return fw_iso_context_stop(client->iso_context);
|
||||
}
|
||||
|
||||
static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
|
||||
{
|
||||
struct fw_cdev_flush_iso *a = &arg->flush_iso;
|
||||
|
||||
if (client->iso_context == NULL || a->handle != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return fw_iso_context_flush_completions(client->iso_context);
|
||||
}
|
||||
|
||||
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
|
||||
{
|
||||
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
|
||||
|
@ -1548,7 +1559,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
|
|||
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
|
||||
e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
|
||||
if (e == NULL) {
|
||||
fw_notify("Out of memory when allocating event\n");
|
||||
fw_notice(card, "out of memory when allocating event\n");
|
||||
break;
|
||||
}
|
||||
e->phy_packet.closure = client->phy_receiver_closure;
|
||||
|
@ -1589,6 +1600,7 @@ static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
|
|||
[0x15] = ioctl_send_phy_packet,
|
||||
[0x16] = ioctl_receive_phy_packets,
|
||||
[0x17] = ioctl_set_iso_channels,
|
||||
[0x18] = ioctl_flush_iso,
|
||||
};
|
||||
|
||||
static int dispatch_ioctl(struct client *client,
|
||||
|
|
|
@ -485,6 +485,7 @@ static int read_rom(struct fw_device *device,
|
|||
*/
|
||||
static int read_config_rom(struct fw_device *device, int generation)
|
||||
{
|
||||
struct fw_card *card = device->card;
|
||||
const u32 *old_rom, *new_rom;
|
||||
u32 *rom, *stack;
|
||||
u32 sp, key;
|
||||
|
@ -529,12 +530,12 @@ static int read_config_rom(struct fw_device *device, int generation)
|
|||
*/
|
||||
if ((rom[2] & 0x7) < device->max_speed ||
|
||||
device->max_speed == SCODE_BETA ||
|
||||
device->card->beta_repeaters_present) {
|
||||
card->beta_repeaters_present) {
|
||||
u32 dummy;
|
||||
|
||||
/* for S1600 and S3200 */
|
||||
if (device->max_speed == SCODE_BETA)
|
||||
device->max_speed = device->card->link_speed;
|
||||
device->max_speed = card->link_speed;
|
||||
|
||||
while (device->max_speed > SCODE_100) {
|
||||
if (read_rom(device, generation, 0, &dummy) ==
|
||||
|
@ -576,9 +577,9 @@ static int read_config_rom(struct fw_device *device, int generation)
|
|||
* a firmware bug. Ignore this whole block, i.e.
|
||||
* simply set a fake block length of 0.
|
||||
*/
|
||||
fw_error("skipped invalid ROM block %x at %llx\n",
|
||||
rom[i],
|
||||
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
|
||||
fw_err(card, "skipped invalid ROM block %x at %llx\n",
|
||||
rom[i],
|
||||
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
|
||||
rom[i] = 0;
|
||||
end = i;
|
||||
}
|
||||
|
@ -604,9 +605,10 @@ static int read_config_rom(struct fw_device *device, int generation)
|
|||
* the ROM don't have to check offsets all the time.
|
||||
*/
|
||||
if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
|
||||
fw_error("skipped unsupported ROM entry %x at %llx\n",
|
||||
rom[i],
|
||||
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
|
||||
fw_err(card,
|
||||
"skipped unsupported ROM entry %x at %llx\n",
|
||||
rom[i],
|
||||
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
|
||||
rom[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
@ -641,6 +643,7 @@ static void fw_unit_release(struct device *dev)
|
|||
{
|
||||
struct fw_unit *unit = fw_unit(dev);
|
||||
|
||||
fw_device_put(fw_parent_device(unit));
|
||||
kfree(unit);
|
||||
}
|
||||
|
||||
|
@ -672,7 +675,7 @@ static void create_units(struct fw_device *device)
|
|||
*/
|
||||
unit = kzalloc(sizeof(*unit), GFP_KERNEL);
|
||||
if (unit == NULL) {
|
||||
fw_error("failed to allocate memory for unit\n");
|
||||
fw_err(device->card, "out of memory for unit\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -692,6 +695,7 @@ static void create_units(struct fw_device *device)
|
|||
if (device_register(&unit->device) < 0)
|
||||
goto skip_unit;
|
||||
|
||||
fw_device_get(device);
|
||||
continue;
|
||||
|
||||
skip_unit:
|
||||
|
@ -873,7 +877,7 @@ static int lookup_existing_device(struct device *dev, void *data)
|
|||
smp_wmb(); /* update node_id before generation */
|
||||
old->generation = card->generation;
|
||||
old->config_rom_retries = 0;
|
||||
fw_notify("rediscovered device %s\n", dev_name(dev));
|
||||
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
|
||||
|
||||
PREPARE_DELAYED_WORK(&old->work, fw_device_update);
|
||||
fw_schedule_device_work(old, 0);
|
||||
|
@ -954,6 +958,7 @@ static void fw_device_init(struct work_struct *work)
|
|||
{
|
||||
struct fw_device *device =
|
||||
container_of(work, struct fw_device, work.work);
|
||||
struct fw_card *card = device->card;
|
||||
struct device *revived_dev;
|
||||
int minor, ret;
|
||||
|
||||
|
@ -970,16 +975,16 @@ static void fw_device_init(struct work_struct *work)
|
|||
fw_schedule_device_work(device, RETRY_DELAY);
|
||||
} else {
|
||||
if (device->node->link_on)
|
||||
fw_notify("giving up on config rom for node id %x\n",
|
||||
fw_notice(card, "giving up on Config ROM for node id %x\n",
|
||||
device->node_id);
|
||||
if (device->node == device->card->root_node)
|
||||
fw_schedule_bm_work(device->card, 0);
|
||||
if (device->node == card->root_node)
|
||||
fw_schedule_bm_work(card, 0);
|
||||
fw_device_release(&device->device);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
revived_dev = device_find_child(device->card->device,
|
||||
revived_dev = device_find_child(card->device,
|
||||
device, lookup_existing_device);
|
||||
if (revived_dev) {
|
||||
put_device(revived_dev);
|
||||
|
@ -1002,7 +1007,7 @@ static void fw_device_init(struct work_struct *work)
|
|||
|
||||
device->device.bus = &fw_bus_type;
|
||||
device->device.type = &fw_device_type;
|
||||
device->device.parent = device->card->device;
|
||||
device->device.parent = card->device;
|
||||
device->device.devt = MKDEV(fw_cdev_major, minor);
|
||||
dev_set_name(&device->device, "fw%d", minor);
|
||||
|
||||
|
@ -1014,7 +1019,7 @@ static void fw_device_init(struct work_struct *work)
|
|||
&device->attribute_group);
|
||||
|
||||
if (device_add(&device->device)) {
|
||||
fw_error("Failed to add device.\n");
|
||||
fw_err(card, "failed to add device\n");
|
||||
goto error_with_cdev;
|
||||
}
|
||||
|
||||
|
@ -1035,18 +1040,10 @@ static void fw_device_init(struct work_struct *work)
|
|||
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
|
||||
fw_schedule_device_work(device, SHUTDOWN_DELAY);
|
||||
} else {
|
||||
if (device->config_rom_retries)
|
||||
fw_notify("created device %s: GUID %08x%08x, S%d00, "
|
||||
"%d config ROM retries\n",
|
||||
dev_name(&device->device),
|
||||
device->config_rom[3], device->config_rom[4],
|
||||
1 << device->max_speed,
|
||||
device->config_rom_retries);
|
||||
else
|
||||
fw_notify("created device %s: GUID %08x%08x, S%d00\n",
|
||||
dev_name(&device->device),
|
||||
device->config_rom[3], device->config_rom[4],
|
||||
1 << device->max_speed);
|
||||
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
|
||||
dev_name(&device->device),
|
||||
device->config_rom[3], device->config_rom[4],
|
||||
1 << device->max_speed);
|
||||
device->config_rom_retries = 0;
|
||||
|
||||
set_broadcast_channel(device, device->generation);
|
||||
|
@ -1058,8 +1055,8 @@ static void fw_device_init(struct work_struct *work)
|
|||
* just end up running the IRM work a couple of extra times -
|
||||
* pretty harmless.
|
||||
*/
|
||||
if (device->node == device->card->root_node)
|
||||
fw_schedule_bm_work(device->card, 0);
|
||||
if (device->node == card->root_node)
|
||||
fw_schedule_bm_work(card, 0);
|
||||
|
||||
return;
|
||||
|
||||
|
@ -1163,12 +1160,13 @@ static void fw_device_refresh(struct work_struct *work)
|
|||
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
|
||||
goto gone;
|
||||
|
||||
fw_notify("refreshed device %s\n", dev_name(&device->device));
|
||||
fw_notice(card, "refreshed device %s\n", dev_name(&device->device));
|
||||
device->config_rom_retries = 0;
|
||||
goto out;
|
||||
|
||||
give_up:
|
||||
fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
|
||||
fw_notice(card, "giving up on refresh of device %s\n",
|
||||
dev_name(&device->device));
|
||||
gone:
|
||||
atomic_set(&device->state, FW_DEVICE_GONE);
|
||||
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
|
||||
|
|
|
@ -192,6 +192,12 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
|
|||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_queue_flush);
|
||||
|
||||
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
|
||||
{
|
||||
return ctx->card->driver->flush_iso_completions(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_flush_completions);
|
||||
|
||||
int fw_iso_context_stop(struct fw_iso_context *ctx)
|
||||
{
|
||||
return ctx->card->driver->stop_iso(ctx);
|
||||
|
|
|
@ -205,19 +205,19 @@ static struct fw_node *build_tree(struct fw_card *card,
|
|||
next_sid = count_ports(sid, &port_count, &child_port_count);
|
||||
|
||||
if (next_sid == NULL) {
|
||||
fw_error("Inconsistent extended self IDs.\n");
|
||||
fw_err(card, "inconsistent extended self IDs\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
q = *sid;
|
||||
if (phy_id != SELF_ID_PHY_ID(q)) {
|
||||
fw_error("PHY ID mismatch in self ID: %d != %d.\n",
|
||||
phy_id, SELF_ID_PHY_ID(q));
|
||||
fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
|
||||
phy_id, SELF_ID_PHY_ID(q));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (child_port_count > stack_depth) {
|
||||
fw_error("Topology stack underflow\n");
|
||||
fw_err(card, "topology stack underflow\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ static struct fw_node *build_tree(struct fw_card *card,
|
|||
|
||||
node = fw_node_create(q, port_count, card->color);
|
||||
if (node == NULL) {
|
||||
fw_error("Out of memory while building topology.\n");
|
||||
fw_err(card, "out of memory while building topology\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -284,8 +284,8 @@ static struct fw_node *build_tree(struct fw_card *card,
|
|||
*/
|
||||
if ((next_sid == end && parent_count != 0) ||
|
||||
(next_sid < end && parent_count != 1)) {
|
||||
fw_error("Parent port inconsistency for node %d: "
|
||||
"parent_count=%d\n", phy_id, parent_count);
|
||||
fw_err(card, "parent port inconsistency for node %d: "
|
||||
"parent_count=%d\n", phy_id, parent_count);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -530,7 +530,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
|||
*/
|
||||
if (!is_next_generation(generation, card->generation) &&
|
||||
card->local_node != NULL) {
|
||||
fw_notify("skipped bus generations, destroying all nodes\n");
|
||||
fw_destroy_nodes(card);
|
||||
card->bm_retries = 0;
|
||||
}
|
||||
|
@ -557,7 +556,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
|||
card->color++;
|
||||
|
||||
if (local_node == NULL) {
|
||||
fw_error("topology build failed\n");
|
||||
fw_err(card, "topology build failed\n");
|
||||
/* FIXME: We need to issue a bus reset in this case. */
|
||||
} else if (card->local_node == NULL) {
|
||||
card->local_node = local_node;
|
||||
|
|
|
@ -565,7 +565,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
|
|||
const struct fw_address_region *region)
|
||||
{
|
||||
struct fw_address_handler *other;
|
||||
unsigned long flags;
|
||||
int ret = -EBUSY;
|
||||
|
||||
if (region->start & 0xffff000000000003ULL ||
|
||||
|
@ -575,7 +574,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
|
|||
handler->length == 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&address_handler_lock, flags);
|
||||
spin_lock_bh(&address_handler_lock);
|
||||
|
||||
handler->offset = region->start;
|
||||
while (handler->offset + handler->length <= region->end) {
|
||||
|
@ -594,7 +593,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
|
|||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&address_handler_lock, flags);
|
||||
spin_unlock_bh(&address_handler_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -602,14 +601,15 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
|
|||
|
||||
/**
|
||||
* fw_core_remove_address_handler() - unregister an address handler
|
||||
*
|
||||
* When fw_core_remove_address_handler() returns, @handler->callback() is
|
||||
* guaranteed to not run on any CPU anymore.
|
||||
*/
|
||||
void fw_core_remove_address_handler(struct fw_address_handler *handler)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&address_handler_lock, flags);
|
||||
spin_lock_bh(&address_handler_lock);
|
||||
list_del(&handler->link);
|
||||
spin_unlock_irqrestore(&address_handler_lock, flags);
|
||||
spin_unlock_bh(&address_handler_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_remove_address_handler);
|
||||
|
||||
|
@ -770,7 +770,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
|
|||
break;
|
||||
|
||||
default:
|
||||
fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
|
||||
fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
|
||||
p->header[0], p->header[1], p->header[2]);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -826,7 +826,6 @@ static void handle_exclusive_region_request(struct fw_card *card,
|
|||
unsigned long long offset)
|
||||
{
|
||||
struct fw_address_handler *handler;
|
||||
unsigned long flags;
|
||||
int tcode, destination, source;
|
||||
|
||||
destination = HEADER_GET_DESTINATION(p->header[0]);
|
||||
|
@ -835,27 +834,19 @@ static void handle_exclusive_region_request(struct fw_card *card,
|
|||
if (tcode == TCODE_LOCK_REQUEST)
|
||||
tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
|
||||
|
||||
spin_lock_irqsave(&address_handler_lock, flags);
|
||||
spin_lock_bh(&address_handler_lock);
|
||||
handler = lookup_enclosing_address_handler(&address_handler_list,
|
||||
offset, request->length);
|
||||
spin_unlock_irqrestore(&address_handler_lock, flags);
|
||||
|
||||
/*
|
||||
* FIXME: lookup the fw_node corresponding to the sender of
|
||||
* this request and pass that to the address handler instead
|
||||
* of the node ID. We may also want to move the address
|
||||
* allocations to fw_node so we only do this callback if the
|
||||
* upper layers registered it for this node.
|
||||
*/
|
||||
|
||||
if (handler == NULL)
|
||||
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
|
||||
else
|
||||
if (handler)
|
||||
handler->address_callback(card, request,
|
||||
tcode, destination, source,
|
||||
p->generation, offset,
|
||||
request->data, request->length,
|
||||
handler->callback_data);
|
||||
spin_unlock_bh(&address_handler_lock);
|
||||
|
||||
if (!handler)
|
||||
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
|
||||
}
|
||||
|
||||
static void handle_fcp_region_request(struct fw_card *card,
|
||||
|
@ -864,7 +855,6 @@ static void handle_fcp_region_request(struct fw_card *card,
|
|||
unsigned long long offset)
|
||||
{
|
||||
struct fw_address_handler *handler;
|
||||
unsigned long flags;
|
||||
int tcode, destination, source;
|
||||
|
||||
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
|
||||
|
@ -886,7 +876,7 @@ static void handle_fcp_region_request(struct fw_card *card,
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&address_handler_lock, flags);
|
||||
spin_lock_bh(&address_handler_lock);
|
||||
list_for_each_entry(handler, &address_handler_list, link) {
|
||||
if (is_enclosing_handler(handler, offset, request->length))
|
||||
handler->address_callback(card, NULL, tcode,
|
||||
|
@ -896,7 +886,7 @@ static void handle_fcp_region_request(struct fw_card *card,
|
|||
request->length,
|
||||
handler->callback_data);
|
||||
}
|
||||
spin_unlock_irqrestore(&address_handler_lock, flags);
|
||||
spin_unlock_bh(&address_handler_lock);
|
||||
|
||||
fw_send_response(card, request, RCODE_COMPLETE);
|
||||
}
|
||||
|
@ -960,7 +950,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
|||
|
||||
if (&t->link == &card->transaction_list) {
|
||||
timed_out:
|
||||
fw_notify("Unsolicited response (source %x, tlabel %x)\n",
|
||||
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
|
||||
source, tlabel);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _FIREWIRE_CORE_H
|
||||
#define _FIREWIRE_CORE_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/idr.h>
|
||||
|
@ -23,6 +25,11 @@ struct fw_packet;
|
|||
|
||||
/* -card */
|
||||
|
||||
extern __printf(2, 3)
|
||||
void fw_err(const struct fw_card *card, const char *fmt, ...);
|
||||
extern __printf(2, 3)
|
||||
void fw_notice(const struct fw_card *card, const char *fmt, ...);
|
||||
|
||||
/* bitfields within the PHY registers */
|
||||
#define PHY_LINK_ACTIVE 0x80
|
||||
#define PHY_CONTENDER 0x40
|
||||
|
@ -99,6 +106,8 @@ struct fw_card_driver {
|
|||
|
||||
void (*flush_queue_iso)(struct fw_iso_context *ctx);
|
||||
|
||||
int (*flush_iso_completions)(struct fw_iso_context *ctx);
|
||||
|
||||
int (*stop_iso)(struct fw_iso_context *ctx);
|
||||
};
|
||||
|
||||
|
@ -141,6 +150,18 @@ extern struct rw_semaphore fw_device_rwsem;
|
|||
extern struct idr fw_device_idr;
|
||||
extern int fw_cdev_major;
|
||||
|
||||
static inline struct fw_device *fw_device_get(struct fw_device *device)
|
||||
{
|
||||
get_device(&device->device);
|
||||
|
||||
return device;
|
||||
}
|
||||
|
||||
static inline void fw_device_put(struct fw_device *device)
|
||||
{
|
||||
put_device(&device->device);
|
||||
}
|
||||
|
||||
struct fw_device *fw_device_get_by_devt(dev_t devt);
|
||||
int fw_device_set_broadcast_channel(struct device *dev, void *gen);
|
||||
void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
|
||||
|
|
|
@ -256,8 +256,8 @@ static int fwnet_header_rebuild(struct sk_buff *skb)
|
|||
if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
|
||||
return arp_find((unsigned char *)&h->h_dest, skb);
|
||||
|
||||
fw_notify("%s: unable to resolve type %04x addresses\n",
|
||||
skb->dev->name, be16_to_cpu(h->h_proto));
|
||||
dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n",
|
||||
be16_to_cpu(h->h_proto));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -369,7 +369,7 @@ static struct fwnet_fragment_info *fwnet_frag_new(
|
|||
|
||||
new = kmalloc(sizeof(*new), GFP_ATOMIC);
|
||||
if (!new) {
|
||||
fw_error("out of memory\n");
|
||||
dev_err(&pd->skb->dev->dev, "out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -414,7 +414,7 @@ fail_w_fi:
|
|||
fail_w_new:
|
||||
kfree(new);
|
||||
fail:
|
||||
fw_error("out of memory\n");
|
||||
dev_err(&net->dev, "out of memory\n");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
|
|||
sspd = arp1394->sspd;
|
||||
/* Sanity check. OS X 10.3 PPC reportedly sends 131. */
|
||||
if (sspd > SCODE_3200) {
|
||||
fw_notify("sspd %x out of range\n", sspd);
|
||||
dev_notice(&net->dev, "sspd %x out of range\n", sspd);
|
||||
sspd = SCODE_3200;
|
||||
}
|
||||
max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
|
||||
|
@ -574,8 +574,9 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
|
|||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
if (!peer) {
|
||||
fw_notify("No peer for ARP packet from %016llx\n",
|
||||
(unsigned long long)peer_guid);
|
||||
dev_notice(&net->dev,
|
||||
"no peer for ARP packet from %016llx\n",
|
||||
(unsigned long long)peer_guid);
|
||||
goto no_peer;
|
||||
}
|
||||
|
||||
|
@ -691,7 +692,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
|
|||
|
||||
skb = dev_alloc_skb(len + net->hard_header_len + 15);
|
||||
if (unlikely(!skb)) {
|
||||
fw_error("out of memory\n");
|
||||
dev_err(&net->dev, "out of memory\n");
|
||||
net->stats.rx_dropped++;
|
||||
|
||||
return -ENOMEM;
|
||||
|
@ -814,7 +815,7 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
|
|||
rcode = RCODE_TYPE_ERROR;
|
||||
else if (fwnet_incoming_packet(dev, payload, length,
|
||||
source, generation, false) != 0) {
|
||||
fw_error("Incoming packet failure\n");
|
||||
dev_err(&dev->netdev->dev, "incoming packet failure\n");
|
||||
rcode = RCODE_CONFLICT_ERROR;
|
||||
} else
|
||||
rcode = RCODE_COMPLETE;
|
||||
|
@ -881,7 +882,7 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
|
|||
if (retval >= 0)
|
||||
fw_iso_context_queue_flush(dev->broadcast_rcv_context);
|
||||
else
|
||||
fw_error("requeue failed\n");
|
||||
dev_err(&dev->netdev->dev, "requeue failed\n");
|
||||
}
|
||||
|
||||
static struct kmem_cache *fwnet_packet_task_cache;
|
||||
|
@ -936,9 +937,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
|
|||
case RFC2374_HDR_LASTFRAG:
|
||||
case RFC2374_HDR_UNFRAG:
|
||||
default:
|
||||
fw_error("Outstanding packet %x lf %x, header %x,%x\n",
|
||||
ptask->outstanding_pkts, lf, ptask->hdr.w0,
|
||||
ptask->hdr.w1);
|
||||
dev_err(&dev->netdev->dev,
|
||||
"outstanding packet %x lf %x, header %x,%x\n",
|
||||
ptask->outstanding_pkts, lf, ptask->hdr.w0,
|
||||
ptask->hdr.w1);
|
||||
BUG();
|
||||
|
||||
case RFC2374_HDR_FIRSTFRAG:
|
||||
|
@ -1010,8 +1012,9 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
|
|||
fwnet_transmit_packet_failed(ptask);
|
||||
|
||||
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
|
||||
fw_error("fwnet_write_complete: "
|
||||
"failed: %x (skipped %d)\n", rcode, errors_skipped);
|
||||
dev_err(&ptask->dev->netdev->dev,
|
||||
"fwnet_write_complete failed: %x (skipped %d)\n",
|
||||
rcode, errors_skipped);
|
||||
|
||||
errors_skipped = 0;
|
||||
last_rcode = rcode;
|
||||
|
@ -1539,14 +1542,12 @@ static int fwnet_probe(struct device *_dev)
|
|||
put_unaligned_be64(card->guid, net->dev_addr);
|
||||
put_unaligned_be64(~0ULL, net->broadcast);
|
||||
ret = register_netdev(net);
|
||||
if (ret) {
|
||||
fw_error("Cannot register the driver\n");
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add_tail(&dev->dev_link, &fwnet_device_list);
|
||||
fw_notify("%s: IPv4 over FireWire on device %016llx\n",
|
||||
net->name, (unsigned long long)card->guid);
|
||||
dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n",
|
||||
dev_name(card->device));
|
||||
have_dev:
|
||||
ret = fwnet_add_peer(dev, unit, device);
|
||||
if (ret && allocated_netdev) {
|
||||
|
@ -1648,7 +1649,7 @@ static const struct ieee1394_device_id fwnet_id_table[] = {
|
|||
static struct fw_driver fwnet_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "net",
|
||||
.name = KBUILD_MODNAME,
|
||||
.bus = &fw_bus_type,
|
||||
.probe = fwnet_probe,
|
||||
.remove = fwnet_remove,
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
#include <linux/timex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -536,7 +536,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
|
|||
u32 p, end;
|
||||
int ret, i;
|
||||
|
||||
if (pci_set_dma_mask(dev, 0xffffffff)) {
|
||||
if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
|
||||
dev_err(&dev->dev,
|
||||
"DMA address limits not supported for PCILynx hardware\n");
|
||||
return -ENXIO;
|
||||
|
|
|
@ -170,10 +170,12 @@ struct context {
|
|||
struct iso_context {
|
||||
struct fw_iso_context base;
|
||||
struct context context;
|
||||
int excess_bytes;
|
||||
void *header;
|
||||
size_t header_length;
|
||||
|
||||
unsigned long flushing_completions;
|
||||
u32 mc_buffer_bus;
|
||||
u16 mc_completed;
|
||||
u16 last_timestamp;
|
||||
u8 sync;
|
||||
u8 tags;
|
||||
};
|
||||
|
@ -338,8 +340,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
|
|||
#define OHCI_PARAM_DEBUG_IRQS 4
|
||||
#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
|
||||
|
||||
#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
|
||||
|
||||
static int param_debug;
|
||||
module_param_named(debug, param_debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
|
||||
|
@ -349,7 +349,7 @@ MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
|
|||
", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
|
||||
", or a combination, or all = -1)");
|
||||
|
||||
static void log_irqs(u32 evt)
|
||||
static void log_irqs(struct fw_ohci *ohci, u32 evt)
|
||||
{
|
||||
if (likely(!(param_debug &
|
||||
(OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
|
||||
|
@ -359,7 +359,8 @@ static void log_irqs(u32 evt)
|
|||
!(evt & OHCI1394_busReset))
|
||||
return;
|
||||
|
||||
fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
|
||||
dev_notice(ohci->card.device,
|
||||
"IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
|
||||
evt & OHCI1394_selfIDComplete ? " selfID" : "",
|
||||
evt & OHCI1394_RQPkt ? " AR_req" : "",
|
||||
evt & OHCI1394_RSPkt ? " AR_resp" : "",
|
||||
|
@ -398,24 +399,29 @@ static char _p(u32 *s, int shift)
|
|||
return port[*s >> shift & 3];
|
||||
}
|
||||
|
||||
static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
|
||||
static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
|
||||
{
|
||||
u32 *s;
|
||||
|
||||
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
|
||||
return;
|
||||
|
||||
fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
|
||||
self_id_count, generation, node_id);
|
||||
dev_notice(ohci->card.device,
|
||||
"%d selfIDs, generation %d, local node ID %04x\n",
|
||||
self_id_count, generation, ohci->node_id);
|
||||
|
||||
for (; self_id_count--; ++s)
|
||||
for (s = ohci->self_id_buffer; self_id_count--; ++s)
|
||||
if ((*s & 1 << 23) == 0)
|
||||
fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
|
||||
dev_notice(ohci->card.device,
|
||||
"selfID 0: %08x, phy %d [%c%c%c] "
|
||||
"%s gc=%d %s %s%s%s\n",
|
||||
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
|
||||
speed[*s >> 14 & 3], *s >> 16 & 63,
|
||||
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
|
||||
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
|
||||
else
|
||||
fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
|
||||
dev_notice(ohci->card.device,
|
||||
"selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
|
||||
*s, *s >> 24 & 63,
|
||||
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
|
||||
_p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
|
||||
|
@ -451,7 +457,8 @@ static const char *tcodes[] = {
|
|||
[0xe] = "link internal", [0xf] = "-reserved-",
|
||||
};
|
||||
|
||||
static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
|
||||
static void log_ar_at_event(struct fw_ohci *ohci,
|
||||
char dir, int speed, u32 *header, int evt)
|
||||
{
|
||||
int tcode = header[0] >> 4 & 0xf;
|
||||
char specific[12];
|
||||
|
@ -463,8 +470,9 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
|
|||
evt = 0x1f;
|
||||
|
||||
if (evt == OHCI1394_evt_bus_reset) {
|
||||
fw_notify("A%c evt_bus_reset, generation %d\n",
|
||||
dir, (header[2] >> 16) & 0xff);
|
||||
dev_notice(ohci->card.device,
|
||||
"A%c evt_bus_reset, generation %d\n",
|
||||
dir, (header[2] >> 16) & 0xff);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -483,39 +491,35 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
|
|||
|
||||
switch (tcode) {
|
||||
case 0xa:
|
||||
fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
|
||||
dev_notice(ohci->card.device,
|
||||
"A%c %s, %s\n",
|
||||
dir, evts[evt], tcodes[tcode]);
|
||||
break;
|
||||
case 0xe:
|
||||
fw_notify("A%c %s, PHY %08x %08x\n",
|
||||
dir, evts[evt], header[1], header[2]);
|
||||
dev_notice(ohci->card.device,
|
||||
"A%c %s, PHY %08x %08x\n",
|
||||
dir, evts[evt], header[1], header[2]);
|
||||
break;
|
||||
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
|
||||
fw_notify("A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s, %04x%08x%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], header[1] & 0xffff, header[2], specific);
|
||||
dev_notice(ohci->card.device,
|
||||
"A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s, %04x%08x%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], header[1] & 0xffff, header[2], specific);
|
||||
break;
|
||||
default:
|
||||
fw_notify("A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], specific);
|
||||
dev_notice(ohci->card.device,
|
||||
"A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], specific);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define param_debug 0
|
||||
static inline void log_irqs(u32 evt) {}
|
||||
static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
|
||||
static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
|
||||
|
||||
#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
|
||||
|
||||
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
|
||||
{
|
||||
writel(data, ohci->registers + offset);
|
||||
|
@ -559,7 +563,7 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
|
|||
if (i >= 3)
|
||||
msleep(1);
|
||||
}
|
||||
fw_error("failed to read phy reg\n");
|
||||
dev_err(ohci->card.device, "failed to read phy reg\n");
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -581,7 +585,7 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
|
|||
if (i >= 3)
|
||||
msleep(1);
|
||||
}
|
||||
fw_error("failed to write phy reg\n");
|
||||
dev_err(ohci->card.device, "failed to write phy reg\n");
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -680,11 +684,14 @@ static void ar_context_release(struct ar_context *ctx)
|
|||
|
||||
static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
|
||||
{
|
||||
if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
|
||||
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
|
||||
flush_writes(ctx->ohci);
|
||||
struct fw_ohci *ohci = ctx->ohci;
|
||||
|
||||
fw_error("AR error: %s; DMA stopped\n", error_msg);
|
||||
if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
|
||||
reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
|
||||
flush_writes(ohci);
|
||||
|
||||
dev_err(ohci->card.device, "AR error: %s; DMA stopped\n",
|
||||
error_msg);
|
||||
}
|
||||
/* FIXME: restart? */
|
||||
}
|
||||
|
@ -854,7 +861,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
|
|||
p.timestamp = status & 0xffff;
|
||||
p.generation = ohci->request_generation;
|
||||
|
||||
log_ar_at_event('R', p.speed, p.header, evt);
|
||||
log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
|
||||
|
||||
/*
|
||||
* Several controllers, notably from NEC and VIA, forget to
|
||||
|
@ -1226,21 +1233,22 @@ static void context_append(struct context *ctx,
|
|||
|
||||
static void context_stop(struct context *ctx)
|
||||
{
|
||||
struct fw_ohci *ohci = ctx->ohci;
|
||||
u32 reg;
|
||||
int i;
|
||||
|
||||
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
|
||||
reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
|
||||
ctx->running = false;
|
||||
|
||||
for (i = 0; i < 1000; i++) {
|
||||
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
|
||||
reg = reg_read(ohci, CONTROL_SET(ctx->regs));
|
||||
if ((reg & CONTEXT_ACTIVE) == 0)
|
||||
return;
|
||||
|
||||
if (i)
|
||||
udelay(10);
|
||||
}
|
||||
fw_error("Error: DMA context still active (0x%08x)\n", reg);
|
||||
dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg);
|
||||
}
|
||||
|
||||
struct driver_data {
|
||||
|
@ -1420,7 +1428,7 @@ static int handle_at_packet(struct context *context,
|
|||
evt = le16_to_cpu(last->transfer_status) & 0x1f;
|
||||
packet->timestamp = le16_to_cpu(last->res_count);
|
||||
|
||||
log_ar_at_event('T', packet->speed, packet->header, evt);
|
||||
log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
|
||||
|
||||
switch (evt) {
|
||||
case OHCI1394_evt_timeout:
|
||||
|
@ -1549,7 +1557,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
|
|||
goto out;
|
||||
}
|
||||
|
||||
fw_error("swap not done (CSR lock timeout)\n");
|
||||
dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n");
|
||||
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
|
||||
|
||||
out:
|
||||
|
@ -1623,15 +1631,10 @@ static void detect_dead_context(struct fw_ohci *ohci,
|
|||
u32 ctl;
|
||||
|
||||
ctl = reg_read(ohci, CONTROL_SET(regs));
|
||||
if (ctl & CONTEXT_DEAD) {
|
||||
#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
|
||||
fw_error("DMA context %s has stopped, error code: %s\n",
|
||||
name, evts[ctl & 0x1f]);
|
||||
#else
|
||||
fw_error("DMA context %s has stopped, error code: %#x\n",
|
||||
name, ctl & 0x1f);
|
||||
#endif
|
||||
}
|
||||
if (ctl & CONTEXT_DEAD)
|
||||
dev_err(ohci->card.device,
|
||||
"DMA context %s has stopped, error code: %s\n",
|
||||
name, evts[ctl & 0x1f]);
|
||||
}
|
||||
|
||||
static void handle_dead_contexts(struct fw_ohci *ohci)
|
||||
|
@ -1781,7 +1784,8 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
|
|||
|
||||
reg = reg_read(ohci, OHCI1394_NodeID);
|
||||
if (!(reg & OHCI1394_NodeID_idValid)) {
|
||||
fw_notify("node ID not valid, new bus reset in progress\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"node ID not valid, new bus reset in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
self_id |= ((reg & 0x3f) << 24); /* phy ID */
|
||||
|
@ -1827,11 +1831,12 @@ static void bus_reset_work(struct work_struct *work)
|
|||
|
||||
reg = reg_read(ohci, OHCI1394_NodeID);
|
||||
if (!(reg & OHCI1394_NodeID_idValid)) {
|
||||
fw_notify("node ID not valid, new bus reset in progress\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"node ID not valid, new bus reset in progress\n");
|
||||
return;
|
||||
}
|
||||
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
|
||||
fw_notify("malconfigured bus\n");
|
||||
dev_notice(ohci->card.device, "malconfigured bus\n");
|
||||
return;
|
||||
}
|
||||
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
|
||||
|
@ -1845,7 +1850,7 @@ static void bus_reset_work(struct work_struct *work)
|
|||
|
||||
reg = reg_read(ohci, OHCI1394_SelfIDCount);
|
||||
if (reg & OHCI1394_SelfIDCount_selfIDError) {
|
||||
fw_notify("inconsistent self IDs\n");
|
||||
dev_notice(ohci->card.device, "inconsistent self IDs\n");
|
||||
return;
|
||||
}
|
||||
/*
|
||||
|
@ -1857,7 +1862,7 @@ static void bus_reset_work(struct work_struct *work)
|
|||
self_id_count = (reg >> 3) & 0xff;
|
||||
|
||||
if (self_id_count > 252) {
|
||||
fw_notify("inconsistent self IDs\n");
|
||||
dev_notice(ohci->card.device, "inconsistent self IDs\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1875,11 +1880,13 @@ static void bus_reset_work(struct work_struct *work)
|
|||
*/
|
||||
if (cond_le32_to_cpu(ohci->self_id_cpu[i])
|
||||
== 0xffff008f) {
|
||||
fw_notify("ignoring spurious self IDs\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"ignoring spurious self IDs\n");
|
||||
self_id_count = j;
|
||||
break;
|
||||
} else {
|
||||
fw_notify("inconsistent self IDs\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"inconsistent self IDs\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1890,13 +1897,14 @@ static void bus_reset_work(struct work_struct *work)
|
|||
if (ohci->quirks & QUIRK_TI_SLLZ059) {
|
||||
self_id_count = find_and_insert_self_id(ohci, self_id_count);
|
||||
if (self_id_count < 0) {
|
||||
fw_notify("could not construct local self ID\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"could not construct local self ID\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (self_id_count == 0) {
|
||||
fw_notify("inconsistent self IDs\n");
|
||||
dev_notice(ohci->card.device, "inconsistent self IDs\n");
|
||||
return;
|
||||
}
|
||||
rmb();
|
||||
|
@ -1917,8 +1925,8 @@ static void bus_reset_work(struct work_struct *work)
|
|||
|
||||
new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
|
||||
if (new_generation != generation) {
|
||||
fw_notify("recursive bus reset detected, "
|
||||
"discarding self ids\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"new bus reset, discarding self ids\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1989,8 +1997,7 @@ static void bus_reset_work(struct work_struct *work)
|
|||
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
|
||||
free_rom, free_rom_bus);
|
||||
|
||||
log_selfids(ohci->node_id, generation,
|
||||
self_id_count, ohci->self_id_buffer);
|
||||
log_selfids(ohci, generation, self_id_count);
|
||||
|
||||
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
|
||||
self_id_count, ohci->self_id_buffer,
|
||||
|
@ -2015,7 +2022,7 @@ static irqreturn_t irq_handler(int irq, void *data)
|
|||
*/
|
||||
reg_write(ohci, OHCI1394_IntEventClear,
|
||||
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
|
||||
log_irqs(event);
|
||||
log_irqs(ohci, event);
|
||||
|
||||
if (event & OHCI1394_selfIDComplete)
|
||||
queue_work(fw_workqueue, &ohci->bus_reset_work);
|
||||
|
@ -2057,8 +2064,7 @@ static irqreturn_t irq_handler(int irq, void *data)
|
|||
}
|
||||
|
||||
if (unlikely(event & OHCI1394_regAccessFail))
|
||||
fw_error("Register access failure - "
|
||||
"please notify linux1394-devel@lists.sf.net\n");
|
||||
dev_err(ohci->card.device, "register access failure\n");
|
||||
|
||||
if (unlikely(event & OHCI1394_postedWriteErr)) {
|
||||
reg_read(ohci, OHCI1394_PostedWriteAddressHi);
|
||||
|
@ -2066,12 +2072,13 @@ static irqreturn_t irq_handler(int irq, void *data)
|
|||
reg_write(ohci, OHCI1394_IntEventClear,
|
||||
OHCI1394_postedWriteErr);
|
||||
if (printk_ratelimit())
|
||||
fw_error("PCI posted write error\n");
|
||||
dev_err(ohci->card.device, "PCI posted write error\n");
|
||||
}
|
||||
|
||||
if (unlikely(event & OHCI1394_cycleTooLong)) {
|
||||
if (printk_ratelimit())
|
||||
fw_notify("isochronous cycle too long\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"isochronous cycle too long\n");
|
||||
reg_write(ohci, OHCI1394_LinkControlSet,
|
||||
OHCI1394_LinkControl_cycleMaster);
|
||||
}
|
||||
|
@ -2084,7 +2091,8 @@ static irqreturn_t irq_handler(int irq, void *data)
|
|||
* them at least two cycles later. (FIXME?)
|
||||
*/
|
||||
if (printk_ratelimit())
|
||||
fw_notify("isochronous cycle inconsistent\n");
|
||||
dev_notice(ohci->card.device,
|
||||
"isochronous cycle inconsistent\n");
|
||||
}
|
||||
|
||||
if (unlikely(event & OHCI1394_unrecoverableError))
|
||||
|
@ -2211,7 +2219,7 @@ static int ohci_enable(struct fw_card *card,
|
|||
int i, ret;
|
||||
|
||||
if (software_reset(ohci)) {
|
||||
fw_error("Failed to reset ohci card.\n");
|
||||
dev_err(card->device, "failed to reset ohci card\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -2235,7 +2243,7 @@ static int ohci_enable(struct fw_card *card,
|
|||
}
|
||||
|
||||
if (!lps) {
|
||||
fw_error("Failed to set Link Power Status\n");
|
||||
dev_err(card->device, "failed to set Link Power Status\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -2244,7 +2252,7 @@ static int ohci_enable(struct fw_card *card,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret)
|
||||
fw_notify("local TSB41BA3D phy\n");
|
||||
dev_notice(card->device, "local TSB41BA3D phy\n");
|
||||
else
|
||||
ohci->quirks &= ~QUIRK_TI_SLLZ059;
|
||||
}
|
||||
|
@ -2344,7 +2352,8 @@ static int ohci_enable(struct fw_card *card,
|
|||
if (request_irq(dev->irq, irq_handler,
|
||||
pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
|
||||
ohci_driver_name, ohci)) {
|
||||
fw_error("Failed to allocate interrupt %d.\n", dev->irq);
|
||||
dev_err(card->device, "failed to allocate interrupt %d\n",
|
||||
dev->irq);
|
||||
pci_disable_msi(dev);
|
||||
|
||||
if (config_rom) {
|
||||
|
@ -2509,7 +2518,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
|
|||
dma_unmap_single(ohci->card.device, packet->payload_bus,
|
||||
packet->payload_length, DMA_TO_DEVICE);
|
||||
|
||||
log_ar_at_event('T', packet->speed, packet->header, 0x20);
|
||||
log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
|
||||
driver_data->packet = NULL;
|
||||
packet->ack = RCODE_CANCELLED;
|
||||
packet->callback(packet, &ohci->card, packet->ack);
|
||||
|
@ -2674,25 +2683,35 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
|
|||
}
|
||||
}
|
||||
|
||||
static void copy_iso_headers(struct iso_context *ctx, void *p)
|
||||
static void flush_iso_completions(struct iso_context *ctx)
|
||||
{
|
||||
int i = ctx->header_length;
|
||||
ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
|
||||
ctx->header_length, ctx->header,
|
||||
ctx->base.callback_data);
|
||||
ctx->header_length = 0;
|
||||
}
|
||||
|
||||
if (i + ctx->base.header_size > PAGE_SIZE)
|
||||
return;
|
||||
static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
|
||||
{
|
||||
u32 *ctx_hdr;
|
||||
|
||||
if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
|
||||
flush_iso_completions(ctx);
|
||||
|
||||
ctx_hdr = ctx->header + ctx->header_length;
|
||||
ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
|
||||
|
||||
/*
|
||||
* The iso header is byteswapped to little endian by
|
||||
* the controller, but the remaining header quadlets
|
||||
* are big endian. We want to present all the headers
|
||||
* as big endian, so we have to swap the first quadlet.
|
||||
* The two iso header quadlets are byteswapped to little
|
||||
* endian by the controller, but we want to present them
|
||||
* as big endian for consistency with the bus endianness.
|
||||
*/
|
||||
if (ctx->base.header_size > 0)
|
||||
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
|
||||
ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
|
||||
if (ctx->base.header_size > 4)
|
||||
*(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
|
||||
ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
|
||||
if (ctx->base.header_size > 8)
|
||||
memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
|
||||
memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
|
||||
ctx->header_length += ctx->base.header_size;
|
||||
}
|
||||
|
||||
|
@ -2704,8 +2723,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
|
|||
container_of(context, struct iso_context, context);
|
||||
struct descriptor *pd;
|
||||
u32 buffer_dma;
|
||||
__le32 *ir_header;
|
||||
void *p;
|
||||
|
||||
for (pd = d; pd <= last; pd++)
|
||||
if (pd->transfer_status)
|
||||
|
@ -2724,17 +2741,10 @@ static int handle_ir_packet_per_buffer(struct context *context,
|
|||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
p = last + 1;
|
||||
copy_iso_headers(ctx, p);
|
||||
copy_iso_headers(ctx, (u32 *) (last + 1));
|
||||
|
||||
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
|
||||
ir_header = (__le32 *) p;
|
||||
ctx->base.callback.sc(&ctx->base,
|
||||
le32_to_cpu(ir_header[0]) & 0xffff,
|
||||
ctx->header_length, ctx->header,
|
||||
ctx->base.callback_data);
|
||||
ctx->header_length = 0;
|
||||
}
|
||||
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
|
||||
flush_iso_completions(ctx);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -2746,29 +2756,51 @@ static int handle_ir_buffer_fill(struct context *context,
|
|||
{
|
||||
struct iso_context *ctx =
|
||||
container_of(context, struct iso_context, context);
|
||||
unsigned int req_count, res_count, completed;
|
||||
u32 buffer_dma;
|
||||
|
||||
if (!last->transfer_status)
|
||||
req_count = le16_to_cpu(last->req_count);
|
||||
res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
|
||||
completed = req_count - res_count;
|
||||
buffer_dma = le32_to_cpu(last->data_address);
|
||||
|
||||
if (completed > 0) {
|
||||
ctx->mc_buffer_bus = buffer_dma;
|
||||
ctx->mc_completed = completed;
|
||||
}
|
||||
|
||||
if (res_count != 0)
|
||||
/* Descriptor(s) not done yet, stop iteration */
|
||||
return 0;
|
||||
|
||||
buffer_dma = le32_to_cpu(last->data_address);
|
||||
dma_sync_single_range_for_cpu(context->ohci->card.device,
|
||||
buffer_dma & PAGE_MASK,
|
||||
buffer_dma & ~PAGE_MASK,
|
||||
le16_to_cpu(last->req_count),
|
||||
DMA_FROM_DEVICE);
|
||||
completed, DMA_FROM_DEVICE);
|
||||
|
||||
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
|
||||
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
|
||||
ctx->base.callback.mc(&ctx->base,
|
||||
le32_to_cpu(last->data_address) +
|
||||
le16_to_cpu(last->req_count) -
|
||||
le16_to_cpu(last->res_count),
|
||||
buffer_dma + completed,
|
||||
ctx->base.callback_data);
|
||||
ctx->mc_completed = 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void flush_ir_buffer_fill(struct iso_context *ctx)
|
||||
{
|
||||
dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
|
||||
ctx->mc_buffer_bus & PAGE_MASK,
|
||||
ctx->mc_buffer_bus & ~PAGE_MASK,
|
||||
ctx->mc_completed, DMA_FROM_DEVICE);
|
||||
|
||||
ctx->base.callback.mc(&ctx->base,
|
||||
ctx->mc_buffer_bus + ctx->mc_completed,
|
||||
ctx->base.callback_data);
|
||||
ctx->mc_completed = 0;
|
||||
}
|
||||
|
||||
static inline void sync_it_packet_for_cpu(struct context *context,
|
||||
struct descriptor *pd)
|
||||
{
|
||||
|
@ -2812,8 +2844,8 @@ static int handle_it_packet(struct context *context,
|
|||
{
|
||||
struct iso_context *ctx =
|
||||
container_of(context, struct iso_context, context);
|
||||
int i;
|
||||
struct descriptor *pd;
|
||||
__be32 *ctx_hdr;
|
||||
|
||||
for (pd = d; pd <= last; pd++)
|
||||
if (pd->transfer_status)
|
||||
|
@ -2824,20 +2856,19 @@ static int handle_it_packet(struct context *context,
|
|||
|
||||
sync_it_packet_for_cpu(context, d);
|
||||
|
||||
i = ctx->header_length;
|
||||
if (i + 4 < PAGE_SIZE) {
|
||||
/* Present this value as big-endian to match the receive code */
|
||||
*(__be32 *)(ctx->header + i) = cpu_to_be32(
|
||||
((u32)le16_to_cpu(pd->transfer_status) << 16) |
|
||||
le16_to_cpu(pd->res_count));
|
||||
ctx->header_length += 4;
|
||||
}
|
||||
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
|
||||
ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
|
||||
ctx->header_length, ctx->header,
|
||||
ctx->base.callback_data);
|
||||
ctx->header_length = 0;
|
||||
}
|
||||
if (ctx->header_length + 4 > PAGE_SIZE)
|
||||
flush_iso_completions(ctx);
|
||||
|
||||
ctx_hdr = ctx->header + ctx->header_length;
|
||||
ctx->last_timestamp = le16_to_cpu(last->res_count);
|
||||
/* Present this value as big-endian to match the receive code */
|
||||
*ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
|
||||
le16_to_cpu(pd->res_count));
|
||||
ctx->header_length += 4;
|
||||
|
||||
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
|
||||
flush_iso_completions(ctx);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -2924,8 +2955,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
|
|||
if (ret < 0)
|
||||
goto out_with_header;
|
||||
|
||||
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
|
||||
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
|
||||
set_multichannel_mask(ohci, 0);
|
||||
ctx->mc_completed = 0;
|
||||
}
|
||||
|
||||
return &ctx->base;
|
||||
|
||||
|
@ -3387,6 +3420,39 @@ static void ohci_flush_queue_iso(struct fw_iso_context *base)
|
|||
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
|
||||
}
|
||||
|
||||
static int ohci_flush_iso_completions(struct fw_iso_context *base)
|
||||
{
|
||||
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||||
int ret = 0;
|
||||
|
||||
tasklet_disable(&ctx->context.tasklet);
|
||||
|
||||
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
|
||||
context_tasklet((unsigned long)&ctx->context);
|
||||
|
||||
switch (base->type) {
|
||||
case FW_ISO_CONTEXT_TRANSMIT:
|
||||
case FW_ISO_CONTEXT_RECEIVE:
|
||||
if (ctx->header_length != 0)
|
||||
flush_iso_completions(ctx);
|
||||
break;
|
||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||
if (ctx->mc_completed != 0)
|
||||
flush_ir_buffer_fill(ctx);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
clear_bit_unlock(0, &ctx->flushing_completions);
|
||||
smp_mb__after_clear_bit();
|
||||
}
|
||||
|
||||
tasklet_enable(&ctx->context.tasklet);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct fw_card_driver ohci_driver = {
|
||||
.enable = ohci_enable,
|
||||
.read_phy_reg = ohci_read_phy_reg,
|
||||
|
@ -3404,6 +3470,7 @@ static const struct fw_card_driver ohci_driver = {
|
|||
.set_iso_channels = ohci_set_iso_channels,
|
||||
.queue_iso = ohci_queue_iso,
|
||||
.flush_queue_iso = ohci_flush_queue_iso,
|
||||
.flush_iso_completions = ohci_flush_iso_completions,
|
||||
.start_iso = ohci_start_iso,
|
||||
.stop_iso = ohci_stop_iso,
|
||||
};
|
||||
|
@ -3463,7 +3530,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
|
|||
|
||||
err = pci_enable_device(dev);
|
||||
if (err) {
|
||||
fw_error("Failed to enable OHCI hardware\n");
|
||||
dev_err(&dev->dev, "failed to enable OHCI hardware\n");
|
||||
goto fail_free;
|
||||
}
|
||||
|
||||
|
@ -3478,13 +3545,13 @@ static int __devinit pci_probe(struct pci_dev *dev,
|
|||
|
||||
err = pci_request_region(dev, 0, ohci_driver_name);
|
||||
if (err) {
|
||||
fw_error("MMIO resource unavailable\n");
|
||||
dev_err(&dev->dev, "MMIO resource unavailable\n");
|
||||
goto fail_disable;
|
||||
}
|
||||
|
||||
ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
|
||||
if (ohci->registers == NULL) {
|
||||
fw_error("Failed to remap registers\n");
|
||||
dev_err(&dev->dev, "failed to remap registers\n");
|
||||
err = -ENXIO;
|
||||
goto fail_iomem;
|
||||
}
|
||||
|
@ -3573,9 +3640,10 @@ static int __devinit pci_probe(struct pci_dev *dev,
|
|||
goto fail_contexts;
|
||||
|
||||
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
|
||||
fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
|
||||
dev_notice(&dev->dev,
|
||||
"added OHCI v%x.%x device as card %d, "
|
||||
"%d IR + %d IT contexts, quirks 0x%x\n",
|
||||
dev_name(&dev->dev), version >> 16, version & 0xff,
|
||||
version >> 16, version & 0xff, ohci->card.index,
|
||||
ohci->n_ir, ohci->n_it, ohci->quirks);
|
||||
|
||||
return 0;
|
||||
|
@ -3604,7 +3672,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
|
|||
pmac_ohci_off(dev);
|
||||
fail:
|
||||
if (err == -ENOMEM)
|
||||
fw_error("Out of memory\n");
|
||||
dev_err(&dev->dev, "out of memory\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -3648,7 +3716,7 @@ static void pci_remove(struct pci_dev *dev)
|
|||
kfree(ohci);
|
||||
pmac_ohci_off(dev);
|
||||
|
||||
fw_notify("Removed fw-ohci device.\n");
|
||||
dev_notice(&dev->dev, "removed fw-ohci device\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -3662,12 +3730,12 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
|
|||
pci_disable_msi(dev);
|
||||
err = pci_save_state(dev);
|
||||
if (err) {
|
||||
fw_error("pci_save_state failed\n");
|
||||
dev_err(&dev->dev, "pci_save_state failed\n");
|
||||
return err;
|
||||
}
|
||||
err = pci_set_power_state(dev, pci_choose_state(dev, state));
|
||||
if (err)
|
||||
fw_error("pci_set_power_state failed with %d\n", err);
|
||||
dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err);
|
||||
pmac_ohci_off(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -3683,7 +3751,7 @@ static int pci_resume(struct pci_dev *dev)
|
|||
pci_restore_state(dev);
|
||||
err = pci_enable_device(dev);
|
||||
if (err) {
|
||||
fw_error("pci_enable_device failed\n");
|
||||
dev_err(&dev->dev, "pci_enable_device failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,8 +125,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
|
|||
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
|
||||
", or a combination)");
|
||||
|
||||
static const char sbp2_driver_name[] = "sbp2";
|
||||
|
||||
/*
|
||||
* We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
|
||||
* and one struct scsi_device per sbp2_logical_unit.
|
||||
|
@ -165,7 +163,6 @@ static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
|
|||
*/
|
||||
struct sbp2_target {
|
||||
struct fw_unit *unit;
|
||||
const char *bus_id;
|
||||
struct list_head lu_list;
|
||||
|
||||
u64 management_agent_address;
|
||||
|
@ -181,11 +178,21 @@ struct sbp2_target {
|
|||
int blocked; /* ditto */
|
||||
};
|
||||
|
||||
static struct fw_device *target_device(struct sbp2_target *tgt)
|
||||
static struct fw_device *target_parent_device(struct sbp2_target *tgt)
|
||||
{
|
||||
return fw_parent_device(tgt->unit);
|
||||
}
|
||||
|
||||
static const struct device *tgt_dev(const struct sbp2_target *tgt)
|
||||
{
|
||||
return &tgt->unit->device;
|
||||
}
|
||||
|
||||
static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
|
||||
{
|
||||
return &lu->tgt->unit->device;
|
||||
}
|
||||
|
||||
/* Impossible login_id, to detect logout attempt before successful login */
|
||||
#define INVALID_LOGIN_ID 0x10000
|
||||
|
||||
|
@ -211,6 +218,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
|
|||
#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
|
||||
#define SBP2_CSR_FIRMWARE_REVISION 0x3c
|
||||
#define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14
|
||||
#define SBP2_CSR_UNIT_UNIQUE_ID 0x8d
|
||||
#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4
|
||||
|
||||
/* Management orb opcodes */
|
||||
|
@ -430,7 +438,8 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
|||
memcpy(status.data, payload + 8, length - 8);
|
||||
|
||||
if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
|
||||
fw_notify("non-orb related status write, not handled\n");
|
||||
dev_notice(lu_dev(lu),
|
||||
"non-ORB related status write, not handled\n");
|
||||
fw_send_response(card, request, RCODE_COMPLETE);
|
||||
return;
|
||||
}
|
||||
|
@ -451,7 +460,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
|||
orb->callback(orb, &status);
|
||||
kref_put(&orb->kref, free_orb); /* orb callback reference */
|
||||
} else {
|
||||
fw_error("status write for unknown orb\n");
|
||||
dev_err(lu_dev(lu), "status write for unknown ORB\n");
|
||||
}
|
||||
|
||||
fw_send_response(card, request, RCODE_COMPLETE);
|
||||
|
@ -492,7 +501,7 @@ static void complete_transaction(struct fw_card *card, int rcode,
|
|||
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
|
||||
int node_id, int generation, u64 offset)
|
||||
{
|
||||
struct fw_device *device = target_device(lu->tgt);
|
||||
struct fw_device *device = target_parent_device(lu->tgt);
|
||||
struct sbp2_pointer orb_pointer;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -513,7 +522,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
|
|||
|
||||
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
|
||||
{
|
||||
struct fw_device *device = target_device(lu->tgt);
|
||||
struct fw_device *device = target_parent_device(lu->tgt);
|
||||
struct sbp2_orb *orb, *next;
|
||||
struct list_head list;
|
||||
unsigned long flags;
|
||||
|
@ -552,7 +561,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
|||
int generation, int function,
|
||||
int lun_or_login_id, void *response)
|
||||
{
|
||||
struct fw_device *device = target_device(lu->tgt);
|
||||
struct fw_device *device = target_parent_device(lu->tgt);
|
||||
struct sbp2_management_orb *orb;
|
||||
unsigned int timeout;
|
||||
int retval = -ENOMEM;
|
||||
|
@ -560,7 +569,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
|||
if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
|
||||
return 0;
|
||||
|
||||
orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
|
||||
orb = kzalloc(sizeof(*orb), GFP_NOIO);
|
||||
if (orb == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -612,20 +621,20 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
|||
|
||||
retval = -EIO;
|
||||
if (sbp2_cancel_orbs(lu) == 0) {
|
||||
fw_error("%s: orb reply timed out, rcode=0x%02x\n",
|
||||
lu->tgt->bus_id, orb->base.rcode);
|
||||
dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n",
|
||||
orb->base.rcode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (orb->base.rcode != RCODE_COMPLETE) {
|
||||
fw_error("%s: management write failed, rcode 0x%02x\n",
|
||||
lu->tgt->bus_id, orb->base.rcode);
|
||||
dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n",
|
||||
orb->base.rcode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (STATUS_GET_RESPONSE(orb->status) != 0 ||
|
||||
STATUS_GET_SBP_STATUS(orb->status) != 0) {
|
||||
fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
|
||||
dev_err(lu_dev(lu), "error status: %d:%d\n",
|
||||
STATUS_GET_RESPONSE(orb->status),
|
||||
STATUS_GET_SBP_STATUS(orb->status));
|
||||
goto out;
|
||||
|
@ -648,7 +657,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
|||
|
||||
static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
|
||||
{
|
||||
struct fw_device *device = target_device(lu->tgt);
|
||||
struct fw_device *device = target_parent_device(lu->tgt);
|
||||
__be32 d = 0;
|
||||
|
||||
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
|
||||
|
@ -665,7 +674,7 @@ static void complete_agent_reset_write_no_wait(struct fw_card *card,
|
|||
|
||||
static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
|
||||
{
|
||||
struct fw_device *device = target_device(lu->tgt);
|
||||
struct fw_device *device = target_parent_device(lu->tgt);
|
||||
struct fw_transaction *t;
|
||||
static __be32 d;
|
||||
|
||||
|
@ -704,7 +713,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
|
|||
static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
|
||||
{
|
||||
struct sbp2_target *tgt = lu->tgt;
|
||||
struct fw_card *card = target_device(tgt)->card;
|
||||
struct fw_card *card = target_parent_device(tgt)->card;
|
||||
struct Scsi_Host *shost =
|
||||
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
|
||||
unsigned long flags;
|
||||
|
@ -728,7 +737,7 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
|
|||
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
|
||||
{
|
||||
struct sbp2_target *tgt = lu->tgt;
|
||||
struct fw_card *card = target_device(tgt)->card;
|
||||
struct fw_card *card = target_parent_device(tgt)->card;
|
||||
struct Scsi_Host *shost =
|
||||
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
|
||||
unsigned long flags;
|
||||
|
@ -753,7 +762,7 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
|
|||
*/
|
||||
static void sbp2_unblock(struct sbp2_target *tgt)
|
||||
{
|
||||
struct fw_card *card = target_device(tgt)->card;
|
||||
struct fw_card *card = target_parent_device(tgt)->card;
|
||||
struct Scsi_Host *shost =
|
||||
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
|
||||
unsigned long flags;
|
||||
|
@ -794,7 +803,7 @@ static int sbp2_lun2int(u16 lun)
|
|||
*/
|
||||
static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
|
||||
{
|
||||
struct fw_device *device = target_device(lu->tgt);
|
||||
struct fw_device *device = target_parent_device(lu->tgt);
|
||||
__be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
|
||||
|
||||
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
|
||||
|
@ -809,7 +818,7 @@ static void sbp2_login(struct work_struct *work)
|
|||
struct sbp2_logical_unit *lu =
|
||||
container_of(work, struct sbp2_logical_unit, work.work);
|
||||
struct sbp2_target *tgt = lu->tgt;
|
||||
struct fw_device *device = target_device(tgt);
|
||||
struct fw_device *device = target_parent_device(tgt);
|
||||
struct Scsi_Host *shost;
|
||||
struct scsi_device *sdev;
|
||||
struct sbp2_login_response response;
|
||||
|
@ -833,8 +842,8 @@ static void sbp2_login(struct work_struct *work)
|
|||
if (lu->retries++ < 5) {
|
||||
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
|
||||
} else {
|
||||
fw_error("%s: failed to login to LUN %04x\n",
|
||||
tgt->bus_id, lu->lun);
|
||||
dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n",
|
||||
lu->lun);
|
||||
/* Let any waiting I/O fail from now on. */
|
||||
sbp2_unblock(lu->tgt);
|
||||
}
|
||||
|
@ -851,8 +860,8 @@ static void sbp2_login(struct work_struct *work)
|
|||
<< 32) | be32_to_cpu(response.command_block_agent.low);
|
||||
lu->login_id = be32_to_cpu(response.misc) & 0xffff;
|
||||
|
||||
fw_notify("%s: logged in to LUN %04x (%d retries)\n",
|
||||
tgt->bus_id, lu->lun, lu->retries);
|
||||
dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n",
|
||||
lu->lun, lu->retries);
|
||||
|
||||
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
|
||||
sbp2_set_busy_timeout(lu);
|
||||
|
@ -919,7 +928,7 @@ static void sbp2_reconnect(struct work_struct *work)
|
|||
struct sbp2_logical_unit *lu =
|
||||
container_of(work, struct sbp2_logical_unit, work.work);
|
||||
struct sbp2_target *tgt = lu->tgt;
|
||||
struct fw_device *device = target_device(tgt);
|
||||
struct fw_device *device = target_parent_device(tgt);
|
||||
int generation, node_id, local_node_id;
|
||||
|
||||
if (fw_device_is_shutdown(device))
|
||||
|
@ -943,7 +952,7 @@ static void sbp2_reconnect(struct work_struct *work)
|
|||
smp_rmb(); /* get current card generation */
|
||||
if (generation == device->card->generation ||
|
||||
lu->retries++ >= 5) {
|
||||
fw_error("%s: failed to reconnect\n", tgt->bus_id);
|
||||
dev_err(tgt_dev(tgt), "failed to reconnect\n");
|
||||
lu->retries = 0;
|
||||
PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
|
||||
}
|
||||
|
@ -957,8 +966,8 @@ static void sbp2_reconnect(struct work_struct *work)
|
|||
smp_wmb(); /* node IDs must not be older than generation */
|
||||
lu->generation = generation;
|
||||
|
||||
fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
|
||||
tgt->bus_id, lu->lun, lu->retries);
|
||||
dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n",
|
||||
lu->lun, lu->retries);
|
||||
|
||||
sbp2_agent_reset(lu);
|
||||
sbp2_cancel_orbs(lu);
|
||||
|
@ -997,6 +1006,13 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sbp2_get_unit_unique_id(struct sbp2_target *tgt,
|
||||
const u32 *leaf)
|
||||
{
|
||||
if ((leaf[0] & 0xffff0000) == 0x00020000)
|
||||
tgt->guid = (u64)leaf[1] << 32 | leaf[2];
|
||||
}
|
||||
|
||||
static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
|
||||
const u32 *directory)
|
||||
{
|
||||
|
@ -1048,6 +1064,10 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
|
|||
return -ENOMEM;
|
||||
break;
|
||||
|
||||
case SBP2_CSR_UNIT_UNIQUE_ID:
|
||||
sbp2_get_unit_unique_id(tgt, ci.p - 1 + value);
|
||||
break;
|
||||
|
||||
case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
|
||||
/* Adjust for the increment in the iterator */
|
||||
if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
|
||||
|
@ -1068,8 +1088,8 @@ static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
|
|||
unsigned int timeout = tgt->mgt_orb_timeout;
|
||||
|
||||
if (timeout > 40000)
|
||||
fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
|
||||
tgt->bus_id, timeout / 1000);
|
||||
dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n",
|
||||
timeout / 1000);
|
||||
|
||||
tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
|
||||
}
|
||||
|
@ -1081,9 +1101,9 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
|
|||
unsigned int w = sbp2_param_workarounds;
|
||||
|
||||
if (w)
|
||||
fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
|
||||
"if you need the workarounds parameter for %s\n",
|
||||
tgt->bus_id);
|
||||
dev_notice(tgt_dev(tgt),
|
||||
"Please notify linux1394-devel@lists.sf.net "
|
||||
"if you need the workarounds parameter\n");
|
||||
|
||||
if (w & SBP2_WORKAROUND_OVERRIDE)
|
||||
goto out;
|
||||
|
@ -1103,9 +1123,9 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
|
|||
}
|
||||
out:
|
||||
if (w)
|
||||
fw_notify("Workarounds for %s: 0x%x "
|
||||
"(firmware_revision 0x%06x, model_id 0x%06x)\n",
|
||||
tgt->bus_id, w, firmware_revision, model);
|
||||
dev_notice(tgt_dev(tgt), "workarounds 0x%x "
|
||||
"(firmware_revision 0x%06x, model_id 0x%06x)\n",
|
||||
w, firmware_revision, model);
|
||||
tgt->workarounds = w;
|
||||
}
|
||||
|
||||
|
@ -1121,6 +1141,10 @@ static int sbp2_probe(struct device *dev)
|
|||
struct Scsi_Host *shost;
|
||||
u32 model, firmware_revision;
|
||||
|
||||
/* cannot (or should not) handle targets on the local node */
|
||||
if (device->is_local)
|
||||
return -ENODEV;
|
||||
|
||||
if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
|
||||
BUG_ON(dma_set_max_seg_size(device->card->device,
|
||||
SBP2_MAX_SEG_SIZE));
|
||||
|
@ -1133,7 +1157,6 @@ static int sbp2_probe(struct device *dev)
|
|||
dev_set_drvdata(&unit->device, tgt);
|
||||
tgt->unit = unit;
|
||||
INIT_LIST_HEAD(&tgt->lu_list);
|
||||
tgt->bus_id = dev_name(&unit->device);
|
||||
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
|
||||
|
||||
if (fw_device_enable_phys_dma(device) < 0)
|
||||
|
@ -1239,7 +1262,7 @@ static int sbp2_remove(struct device *dev)
|
|||
kfree(lu);
|
||||
}
|
||||
scsi_remove_host(shost);
|
||||
fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
|
||||
dev_notice(dev, "released target %d:0:0\n", shost->host_no);
|
||||
|
||||
scsi_host_put(shost);
|
||||
return 0;
|
||||
|
@ -1261,7 +1284,7 @@ static const struct ieee1394_device_id sbp2_id_table[] = {
|
|||
static struct fw_driver sbp2_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = sbp2_driver_name,
|
||||
.name = KBUILD_MODNAME,
|
||||
.bus = &fw_bus_type,
|
||||
.probe = sbp2_probe,
|
||||
.remove = sbp2_remove,
|
||||
|
@ -1286,10 +1309,19 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
|
|||
static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
|
||||
{
|
||||
int sam_status;
|
||||
int sfmt = (sbp2_status[0] >> 6) & 0x03;
|
||||
|
||||
sense_data[0] = 0x70;
|
||||
if (sfmt == 2 || sfmt == 3) {
|
||||
/*
|
||||
* Reserved for future standardization (2) or
|
||||
* Status block format vendor-dependent (3)
|
||||
*/
|
||||
return DID_ERROR << 16;
|
||||
}
|
||||
|
||||
sense_data[0] = 0x70 | sfmt | (sbp2_status[1] & 0x80);
|
||||
sense_data[1] = 0x0;
|
||||
sense_data[2] = sbp2_status[1];
|
||||
sense_data[2] = ((sbp2_status[1] << 1) & 0xe0) | (sbp2_status[1] & 0x0f);
|
||||
sense_data[3] = sbp2_status[4];
|
||||
sense_data[4] = sbp2_status[5];
|
||||
sense_data[5] = sbp2_status[6];
|
||||
|
@ -1325,7 +1357,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
|
|||
{
|
||||
struct sbp2_command_orb *orb =
|
||||
container_of(base_orb, struct sbp2_command_orb, base);
|
||||
struct fw_device *device = target_device(orb->lu->tgt);
|
||||
struct fw_device *device = target_parent_device(orb->lu->tgt);
|
||||
int result;
|
||||
|
||||
if (status != NULL) {
|
||||
|
@ -1433,7 +1465,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
|
|||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct sbp2_logical_unit *lu = cmd->device->hostdata;
|
||||
struct fw_device *device = target_device(lu->tgt);
|
||||
struct fw_device *device = target_parent_device(lu->tgt);
|
||||
struct sbp2_command_orb *orb;
|
||||
int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
|
@ -1442,7 +1474,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
|
|||
* transfer direction not handled.
|
||||
*/
|
||||
if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
|
||||
fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
|
||||
dev_err(lu_dev(lu), "cannot handle bidirectional command\n");
|
||||
cmd->result = DID_ERROR << 16;
|
||||
cmd->scsi_done(cmd);
|
||||
return 0;
|
||||
|
@ -1450,7 +1482,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
|
|||
|
||||
orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
|
||||
if (orb == NULL) {
|
||||
fw_notify("failed to alloc orb\n");
|
||||
dev_notice(lu_dev(lu), "failed to alloc ORB\n");
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
|
||||
|
@ -1550,7 +1582,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
|
|||
{
|
||||
struct sbp2_logical_unit *lu = cmd->device->hostdata;
|
||||
|
||||
fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
|
||||
dev_notice(lu_dev(lu), "sbp2_scsi_abort\n");
|
||||
sbp2_agent_reset(lu);
|
||||
sbp2_cancel_orbs(lu);
|
||||
|
||||
|
@ -1590,7 +1622,7 @@ static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
|
|||
static struct scsi_host_template scsi_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "SBP-2 IEEE-1394",
|
||||
.proc_name = sbp2_driver_name,
|
||||
.proc_name = "sbp2",
|
||||
.queuecommand = sbp2_scsi_queuecommand,
|
||||
.slave_alloc = sbp2_scsi_slave_alloc,
|
||||
.slave_configure = sbp2_scsi_slave_configure,
|
||||
|
|
|
@ -207,12 +207,16 @@ struct fw_cdev_event_request2 {
|
|||
* @closure: See &fw_cdev_event_common;
|
||||
* set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
|
||||
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_ISO_INTERRUPT
|
||||
* @cycle: Cycle counter of the interrupt packet
|
||||
* @cycle: Cycle counter of the last completed packet
|
||||
* @header_length: Total length of following headers, in bytes
|
||||
* @header: Stripped headers, if any
|
||||
*
|
||||
* This event is sent when the controller has completed an &fw_cdev_iso_packet
|
||||
* with the %FW_CDEV_ISO_INTERRUPT bit set.
|
||||
* with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
|
||||
* %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
|
||||
* without the interrupt bit set that the kernel's internal buffer for @header
|
||||
* is about to overflow. (In the last case, kernels with ABI version < 5 drop
|
||||
* header data up to the next interrupt packet.)
|
||||
*
|
||||
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
|
||||
*
|
||||
|
@ -267,9 +271,9 @@ struct fw_cdev_event_iso_interrupt {
|
|||
*
|
||||
* This event is sent in multichannel contexts (context type
|
||||
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
|
||||
* chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens
|
||||
* when a packet is completed and/or when a buffer chunk is completed depends
|
||||
* on the hardware implementation.
|
||||
* chunks that have been completely filled and that have the
|
||||
* %FW_CDEV_ISO_INTERRUPT bit set, or when explicitly requested with
|
||||
* %FW_CDEV_IOC_FLUSH_ISO.
|
||||
*
|
||||
* The buffer is continuously filled with the following data, per packet:
|
||||
* - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt,
|
||||
|
@ -419,6 +423,9 @@ union fw_cdev_event {
|
|||
#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets)
|
||||
#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels)
|
||||
|
||||
/* available since kernel version 3.4 */
|
||||
#define FW_CDEV_IOC_FLUSH_ISO _IOW('#', 0x18, struct fw_cdev_flush_iso)
|
||||
|
||||
/*
|
||||
* ABI version history
|
||||
* 1 (2.6.22) - initial version
|
||||
|
@ -441,6 +448,9 @@ union fw_cdev_event {
|
|||
* - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL,
|
||||
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and
|
||||
* %FW_CDEV_IOC_SET_ISO_CHANNELS
|
||||
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
|
||||
* avoid dropping data
|
||||
* - added %FW_CDEV_IOC_FLUSH_ISO
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -850,6 +860,25 @@ struct fw_cdev_stop_iso {
|
|||
__u32 handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fw_cdev_flush_iso - flush completed iso packets
|
||||
* @handle: handle of isochronous context to flush
|
||||
*
|
||||
* For %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE contexts,
|
||||
* report any completed packets.
|
||||
*
|
||||
* For %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL contexts, report the current
|
||||
* offset in the receive buffer, if it has changed; this is typically in the
|
||||
* middle of some buffer chunk.
|
||||
*
|
||||
* Any %FW_CDEV_EVENT_ISO_INTERRUPT or %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
|
||||
* events generated by this ioctl are sent synchronously, i.e., are available
|
||||
* for reading from the file descriptor when this ioctl returns.
|
||||
*/
|
||||
struct fw_cdev_flush_iso {
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fw_cdev_get_cycle_timer - read cycle timer register
|
||||
* @local_time: system time, in microseconds since the Epoch
|
||||
|
|
|
@ -17,9 +17,6 @@
|
|||
#include <linux/atomic.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
|
||||
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
|
||||
|
||||
#define CSR_REGISTER_BASE 0xfffff0000000ULL
|
||||
|
||||
/* register offsets are relative to CSR_REGISTER_BASE */
|
||||
|
@ -203,18 +200,6 @@ static inline int fw_device_is_shutdown(struct fw_device *device)
|
|||
return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
|
||||
}
|
||||
|
||||
static inline struct fw_device *fw_device_get(struct fw_device *device)
|
||||
{
|
||||
get_device(&device->device);
|
||||
|
||||
return device;
|
||||
}
|
||||
|
||||
static inline void fw_device_put(struct fw_device *device)
|
||||
{
|
||||
put_device(&device->device);
|
||||
}
|
||||
|
||||
int fw_device_enable_phys_dma(struct fw_device *device);
|
||||
|
||||
/*
|
||||
|
@ -441,6 +426,7 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
|
|||
struct fw_iso_buffer *buffer,
|
||||
unsigned long payload);
|
||||
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
|
||||
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
|
||||
int fw_iso_context_start(struct fw_iso_context *ctx,
|
||||
int cycle, int sync, int tags);
|
||||
int fw_iso_context_stop(struct fw_iso_context *ctx);
|
||||
|
|
|
@ -611,7 +611,6 @@ static void isight_card_free(struct snd_card *card)
|
|||
|
||||
fw_iso_resources_destroy(&isight->resources);
|
||||
fw_unit_put(isight->unit);
|
||||
fw_device_put(isight->device);
|
||||
mutex_destroy(&isight->mutex);
|
||||
}
|
||||
|
||||
|
@ -644,7 +643,7 @@ static int isight_probe(struct device *unit_dev)
|
|||
isight->card = card;
|
||||
mutex_init(&isight->mutex);
|
||||
isight->unit = fw_unit_get(unit);
|
||||
isight->device = fw_device_get(fw_dev);
|
||||
isight->device = fw_dev;
|
||||
isight->audio_base = get_unit_base(unit);
|
||||
if (!isight->audio_base) {
|
||||
dev_err(&unit->device, "audio unit base not found\n");
|
||||
|
@ -681,7 +680,6 @@ static int isight_probe(struct device *unit_dev)
|
|||
|
||||
err_unit:
|
||||
fw_unit_put(isight->unit);
|
||||
fw_device_put(isight->device);
|
||||
mutex_destroy(&isight->mutex);
|
||||
error:
|
||||
snd_card_free(card);
|
||||
|
|
|
@ -656,12 +656,10 @@ static u32 fwspk_read_firmware_version(struct fw_unit *unit)
|
|||
static void fwspk_card_free(struct snd_card *card)
|
||||
{
|
||||
struct fwspk *fwspk = card->private_data;
|
||||
struct fw_device *dev = fw_parent_device(fwspk->unit);
|
||||
|
||||
amdtp_out_stream_destroy(&fwspk->stream);
|
||||
cmp_connection_destroy(&fwspk->connection);
|
||||
fw_unit_put(fwspk->unit);
|
||||
fw_device_put(dev);
|
||||
mutex_destroy(&fwspk->mutex);
|
||||
}
|
||||
|
||||
|
@ -718,7 +716,6 @@ static int __devinit fwspk_probe(struct device *unit_dev)
|
|||
fwspk = card->private_data;
|
||||
fwspk->card = card;
|
||||
mutex_init(&fwspk->mutex);
|
||||
fw_device_get(fw_dev);
|
||||
fwspk->unit = fw_unit_get(unit);
|
||||
fwspk->device_info = fwspk_detect(fw_dev);
|
||||
if (!fwspk->device_info) {
|
||||
|
@ -767,7 +764,6 @@ err_connection:
|
|||
cmp_connection_destroy(&fwspk->connection);
|
||||
err_unit:
|
||||
fw_unit_put(fwspk->unit);
|
||||
fw_device_put(fw_dev);
|
||||
mutex_destroy(&fwspk->mutex);
|
||||
error:
|
||||
snd_card_free(card);
|
||||
|
|
Loading…
Reference in a new issue