diff options
| author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2014-03-03 23:23:51 +0100 | 
|---|---|---|
| committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2014-12-10 20:53:21 +0100 | 
| commit | d737d7da8e7e931360282199341f44ac0803c837 (patch) | |
| tree | 379cad73b30b61286933ea84e25b3492b3b4f0dd /drivers/firewire | |
| parent | 8e045a31e7c0536e4deb750b37c919fadcb44aa3 (diff) | |
| download | linux-d737d7da8e7e931360282199341f44ac0803c837.tar.bz2 | |
firewire: sbp2: replace card lock by target lock
firewire-core uses fw_card.lock to protect topology data and transaction
data.  firewire-sbp2 uses fw_card.lock for entirely unrelated purposes.
Introduce a sbp2_target.lock to firewire-sbp2 and replace all
fw_card.lock uses in the driver.  fw_card.lock is now entirely private
to firewire-core.  This has no immediate advantage apart from making it
clear in the code that firewire-sbp2 does not interact with the core
via the core lock.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire')
| -rw-r--r-- | drivers/firewire/sbp2.c | 60 | 
1 files changed, 29 insertions, 31 deletions
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 1f3f37a39a60..64ac8f8f5098 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -174,6 +174,7 @@ struct sbp2_target {  	unsigned int mgt_orb_timeout;  	unsigned int max_payload; +	spinlock_t lock;  	int dont_block;	/* counter for each logical unit */  	int blocked;	/* ditto */  }; @@ -270,6 +271,7 @@ struct sbp2_orb {  	dma_addr_t request_bus;  	int rcode;  	void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); +	struct sbp2_logical_unit *lu;  	struct list_head link;  }; @@ -321,7 +323,6 @@ struct sbp2_command_orb {  		u8 command_block[SBP2_MAX_CDB_SIZE];  	} request;  	struct scsi_cmnd *cmd; -	struct sbp2_logical_unit *lu;  	struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));  	dma_addr_t page_table_bus; @@ -444,7 +445,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,  	}  	/* Lookup the orb corresponding to this status write. */ -	spin_lock_irqsave(&card->lock, flags); +	spin_lock_irqsave(&lu->tgt->lock, flags);  	list_for_each_entry(orb, &lu->orb_list, link) {  		if (STATUS_GET_ORB_HIGH(status) == 0 &&  		    STATUS_GET_ORB_LOW(status) == orb->request_bus) { @@ -453,7 +454,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,  			break;  		}  	} -	spin_unlock_irqrestore(&card->lock, flags); +	spin_unlock_irqrestore(&lu->tgt->lock, flags);  	if (&orb->link != &lu->orb_list) {  		orb->callback(orb, &status); @@ -480,18 +481,18 @@ static void complete_transaction(struct fw_card *card, int rcode,  	 * been set and only does the cleanup if the transaction  	 * failed and we didn't already get a status write.  	 */ -	spin_lock_irqsave(&card->lock, flags); +	spin_lock_irqsave(&orb->lu->tgt->lock, flags);  	if (orb->rcode == -1)  		orb->rcode = rcode;  	if (orb->rcode != RCODE_COMPLETE) {  		list_del(&orb->link); -		spin_unlock_irqrestore(&card->lock, flags); +		spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);  		orb->callback(orb, NULL);  		kref_put(&orb->kref, free_orb); /* orb callback reference */  	} else { -		spin_unlock_irqrestore(&card->lock, flags); +		spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);  	}  	kref_put(&orb->kref, free_orb); /* transaction callback reference */ @@ -507,9 +508,10 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,  	orb_pointer.high = 0;  	orb_pointer.low = cpu_to_be32(orb->request_bus); -	spin_lock_irqsave(&device->card->lock, flags); +	orb->lu = lu; +	spin_lock_irqsave(&lu->tgt->lock, flags);  	list_add_tail(&orb->link, &lu->orb_list); -	spin_unlock_irqrestore(&device->card->lock, flags); +	spin_unlock_irqrestore(&lu->tgt->lock, flags);  	kref_get(&orb->kref); /* transaction callback reference */  	kref_get(&orb->kref); /* orb callback reference */ @@ -527,9 +529,9 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)  	int retval = -ENOENT;  	INIT_LIST_HEAD(&list); -	spin_lock_irq(&device->card->lock); +	spin_lock_irq(&lu->tgt->lock);  	list_splice_init(&lu->orb_list, &list); -	spin_unlock_irq(&device->card->lock); +	spin_unlock_irq(&lu->tgt->lock);  	list_for_each_entry_safe(orb, next, &list, link) {  		retval = 0; @@ -686,14 +688,11 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)  			&d, 4, complete_agent_reset_write_no_wait, t);  } -static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) +static inline void sbp2_allow_block(struct sbp2_target *tgt)  { -	struct sbp2_target *tgt = lu->tgt; -	struct fw_card *card = target_parent_device(tgt)->card; - -	spin_lock_irq(&card->lock); +	spin_lock_irq(&tgt->lock);  	--tgt->dont_block; -	spin_unlock_irq(&card->lock); +	spin_unlock_irq(&tgt->lock);  }  /* @@ -702,7 +701,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)   *     logical units have been finished (indicated by dont_block == 0).   *   - lu->generation is stale.   * - * Note, scsi_block_requests() must be called while holding card->lock, + * Note, scsi_block_requests() must be called while holding tgt->lock,   * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to   * unblock the target.   */ @@ -714,20 +713,20 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)  		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);  	unsigned long flags; -	spin_lock_irqsave(&card->lock, flags); +	spin_lock_irqsave(&tgt->lock, flags);  	if (!tgt->dont_block && !lu->blocked &&  	    lu->generation != card->generation) {  		lu->blocked = true;  		if (++tgt->blocked == 1)  			scsi_block_requests(shost);  	} -	spin_unlock_irqrestore(&card->lock, flags); +	spin_unlock_irqrestore(&tgt->lock, flags);  }  /*   * Unblocks lu->tgt as soon as all its logical units can be unblocked.   * Note, it is harmless to run scsi_unblock_requests() outside the - * card->lock protected section.  On the other hand, running it inside + * tgt->lock protected section.  On the other hand, running it inside   * the section might clash with shost->host_lock.   */  static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) @@ -738,12 +737,12 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)  		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);  	bool unblock = false; -	spin_lock_irq(&card->lock); +	spin_lock_irq(&tgt->lock);  	if (lu->blocked && lu->generation == card->generation) {  		lu->blocked = false;  		unblock = --tgt->blocked == 0;  	} -	spin_unlock_irq(&card->lock); +	spin_unlock_irq(&tgt->lock);  	if (unblock)  		scsi_unblock_requests(shost); @@ -752,18 +751,17 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)  /*   * Prevents future blocking of tgt and unblocks it.   * Note, it is harmless to run scsi_unblock_requests() outside the - * card->lock protected section.  On the other hand, running it inside + * tgt->lock protected section.  On the other hand, running it inside   * the section might clash with shost->host_lock.   */  static void sbp2_unblock(struct sbp2_target *tgt)  { -	struct fw_card *card = target_parent_device(tgt)->card;  	struct Scsi_Host *shost =  		container_of((void *)tgt, struct Scsi_Host, hostdata[0]); -	spin_lock_irq(&card->lock); +	spin_lock_irq(&tgt->lock);  	++tgt->dont_block; -	spin_unlock_irq(&card->lock); +	spin_unlock_irq(&tgt->lock);  	scsi_unblock_requests(shost);  } @@ -899,7 +897,7 @@ static void sbp2_login(struct work_struct *work)  	/* No error during __scsi_add_device() */  	lu->has_sdev = true;  	scsi_device_put(sdev); -	sbp2_allow_block(lu); +	sbp2_allow_block(tgt);  	return; @@ -1158,6 +1156,7 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)  	dev_set_drvdata(&unit->device, tgt);  	tgt->unit = unit;  	INIT_LIST_HEAD(&tgt->lu_list); +	spin_lock_init(&tgt->lock);  	tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];  	if (fw_device_enable_phys_dma(device) < 0) @@ -1354,12 +1353,12 @@ static void complete_command_orb(struct sbp2_orb *base_orb,  {  	struct sbp2_command_orb *orb =  		container_of(base_orb, struct sbp2_command_orb, base); -	struct fw_device *device = target_parent_device(orb->lu->tgt); +	struct fw_device *device = target_parent_device(base_orb->lu->tgt);  	int result;  	if (status != NULL) {  		if (STATUS_GET_DEAD(*status)) -			sbp2_agent_reset_no_wait(orb->lu); +			sbp2_agent_reset_no_wait(base_orb->lu);  		switch (STATUS_GET_RESPONSE(*status)) {  		case SBP2_STATUS_REQUEST_COMPLETE: @@ -1385,7 +1384,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,  		 * or when sending the write (less likely).  		 */  		result = DID_BUS_BUSY << 16; -		sbp2_conditionally_block(orb->lu); +		sbp2_conditionally_block(base_orb->lu);  	}  	dma_unmap_single(device->card->device, orb->base.request_bus, @@ -1482,7 +1481,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,  	/* Initialize rcode to something not RCODE_COMPLETE. */  	orb->base.rcode = -1;  	kref_init(&orb->base.kref); -	orb->lu = lu;  	orb->cmd = cmd;  	orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);  	orb->request.misc = cpu_to_be32(  |