*/
int generation;
int retries;
+ work_func_t workfn;
struct delayed_work work;
bool has_sdev;
bool blocked;
unsigned int mgt_orb_timeout;
unsigned int max_payload;
+ spinlock_t lock;
int dont_block; /* counter for each logical unit */
int blocked; /* ditto */
};
dma_addr_t request_bus;
int rcode;
void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
+ struct sbp2_logical_unit *lu;
struct list_head link;
};
u8 command_block[SBP2_MAX_CDB_SIZE];
} request;
struct scsi_cmnd *cmd;
- struct sbp2_logical_unit *lu;
struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
dma_addr_t page_table_bus;
}
/* Lookup the orb corresponding to this status write. */
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&lu->tgt->lock, flags);
list_for_each_entry(orb, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
STATUS_GET_ORB_LOW(status) == orb->request_bus) {
break;
}
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&lu->tgt->lock, flags);
if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
*/
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&orb->lu->tgt->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
if (orb->rcode != RCODE_COMPLETE) {
list_del(&orb->link);
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
orb->callback(orb, NULL);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
}
kref_put(&orb->kref, free_orb); /* transaction callback reference */
orb_pointer.high = 0;
orb_pointer.low = cpu_to_be32(orb->request_bus);
- spin_lock_irqsave(&device->card->lock, flags);
+ orb->lu = lu;
+ spin_lock_irqsave(&lu->tgt->lock, flags);
list_add_tail(&orb->link, &lu->orb_list);
- spin_unlock_irqrestore(&device->card->lock, flags);
+ spin_unlock_irqrestore(&lu->tgt->lock, flags);
kref_get(&orb->kref); /* transaction callback reference */
kref_get(&orb->kref); /* orb callback reference */
struct fw_device *device = target_parent_device(lu->tgt);
struct sbp2_orb *orb, *next;
struct list_head list;
- unsigned long flags;
int retval = -ENOENT;
INIT_LIST_HEAD(&list);
- spin_lock_irqsave(&device->card->lock, flags);
+ spin_lock_irq(&lu->tgt->lock);
list_splice_init(&lu->orb_list, &list);
- spin_unlock_irqrestore(&device->card->lock, flags);
+ spin_unlock_irq(&lu->tgt->lock);
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
&d, 4, complete_agent_reset_write_no_wait, t);
}
-static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
+static inline void sbp2_allow_block(struct sbp2_target *tgt)
{
- /*
- * We may access dont_block without taking card->lock here:
- * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
- * are currently serialized against each other.
- * And a wrong result in sbp2_conditionally_block()'s access of
- * dont_block is rather harmless, it simply misses its first chance.
- */
- --lu->tgt->dont_block;
+ spin_lock_irq(&tgt->lock);
+ --tgt->dont_block;
+ spin_unlock_irq(&tgt->lock);
}
/*
* logical units have been finished (indicated by dont_block == 0).
* - lu->generation is stale.
*
- * Note, scsi_block_requests() must be called while holding card->lock,
+ * Note, scsi_block_requests() must be called while holding tgt->lock,
* otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
* unblock the target.
*/
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&tgt->lock, flags);
if (!tgt->dont_block && !lu->blocked &&
lu->generation != card->generation) {
lu->blocked = true;
if (++tgt->blocked == 1)
scsi_block_requests(shost);
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&tgt->lock, flags);
}
/*
* Unblocks lu->tgt as soon as all its logical units can be unblocked.
* Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section. On the other hand, running it inside
+ * tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- unsigned long flags;
bool unblock = false;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&tgt->lock);
if (lu->blocked && lu->generation == card->generation) {
lu->blocked = false;
unblock = --tgt->blocked == 0;
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&tgt->lock);
if (unblock)
scsi_unblock_requests(shost);
/*
* Prevents future blocking of tgt and unblocks it.
* Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section. On the other hand, running it inside
+ * tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_unblock(struct sbp2_target *tgt)
{
- struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&tgt->lock);
++tgt->dont_block;
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&tgt->lock);
scsi_unblock_requests(shost);
}
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu);
- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+ lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu);
/* This was a re-login. */
/* No error during __scsi_add_device() */
lu->has_sdev = true;
scsi_device_put(sdev);
- sbp2_allow_block(lu);
+ sbp2_allow_block(tgt);
return;
* If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login.
*/
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
static void sbp2_reconnect(struct work_struct *work)
lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
sbp2_conditionally_unblock(lu);
}
+static void sbp2_lu_workfn(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
+ struct sbp2_logical_unit, work);
+ lu->workfn(work);
+}
+
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{
struct sbp2_logical_unit *lu;
lu->blocked = false;
++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
- INIT_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
+ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list);
return 0;
}
static struct scsi_host_template scsi_driver_template;
-static int sbp2_remove(struct device *dev);
+static void sbp2_remove(struct fw_unit *unit);
-static int sbp2_probe(struct device *dev)
+static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
{
- struct fw_unit *unit = fw_unit(dev);
struct fw_device *device = fw_parent_device(unit);
struct sbp2_target *tgt;
struct sbp2_logical_unit *lu;
dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
INIT_LIST_HEAD(&tgt->lu_list);
+ spin_lock_init(&tgt->lock);
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
if (fw_device_enable_phys_dma(device) < 0)
return 0;
fail_remove:
- sbp2_remove(dev);
+ sbp2_remove(unit);
return -ENOMEM;
fail_shost_put:
}
}
-static int sbp2_remove(struct device *dev)
+static void sbp2_remove(struct fw_unit *unit)
{
- struct fw_unit *unit = fw_unit(dev);
struct fw_device *device = fw_parent_device(unit);
struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
struct sbp2_logical_unit *lu, *next;
kfree(lu);
}
scsi_remove_host(shost);
- dev_notice(dev, "released target %d:0:0\n", shost->host_no);
+ dev_notice(&unit->device, "released target %d:0:0\n", shost->host_no);
scsi_host_put(shost);
- return 0;
}
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
- .probe = sbp2_probe,
- .remove = sbp2_remove,
},
+ .probe = sbp2_probe,
.update = sbp2_update,
+ .remove = sbp2_remove,
.id_table = sbp2_id_table,
};
{
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
- struct fw_device *device = target_parent_device(orb->lu->tgt);
+ struct fw_device *device = target_parent_device(base_orb->lu->tgt);
int result;
if (status != NULL) {
if (STATUS_GET_DEAD(*status))
- sbp2_agent_reset_no_wait(orb->lu);
+ sbp2_agent_reset_no_wait(base_orb->lu);
switch (STATUS_GET_RESPONSE(*status)) {
case SBP2_STATUS_REQUEST_COMPLETE:
* or when sending the write (less likely).
*/
result = DID_BUS_BUSY << 16;
- sbp2_conditionally_block(orb->lu);
+ sbp2_conditionally_block(base_orb->lu);
}
dma_unmap_single(device->card->device, orb->base.request_bus,
struct sbp2_command_orb *orb;
int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
- /*
- * Bidirectional commands are not yet implemented, and unknown
- * transfer direction not handled.
- */
- if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
- dev_err(lu_dev(lu), "cannot handle bidirectional command\n");
- cmd->result = DID_ERROR << 16;
- cmd->scsi_done(cmd);
- return 0;
- }
-
orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
if (orb == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
/* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1;
kref_init(&orb->base.kref);
- orb->lu = lu;
orb->cmd = cmd;
orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
orb->request.misc = cpu_to_be32(
.this_id = -1,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
- .cmd_per_lun = 1,
.can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs,
};