target: Add SCF_SCSI_TMR_CDB usage and drop se_tmr_req_cache

Change the test for if a cmd is a tmr request to checking if
SCF_SCSI_TMR_CDB (a new flag) is set in cmd->se_cmd_flags.

Also remove se_tmr_req_cache usage in favor of kzalloc usage,
and make core_tmr_alloc_req() return int + setup se_cmd->se_tmr_req
directly and fix up various fabric module usages

Cc: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Andy Grover 2012-01-19 13:39:17 -08:00 committed by Nicholas Bellinger
parent 35b2cdc4fe
commit c8e31f26fe
8 changed files with 51 additions and 61 deletions

View File

@ -1878,8 +1878,8 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
goto process_tmr;
}
cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
if (!cmd->se_tmr_req) {
res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
if (res < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto process_tmr;

View File

@ -229,6 +229,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
{
struct iscsi_cmd *cmd;
struct se_cmd *se_cmd;
int rc;
u8 tcm_function;
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
@ -286,10 +287,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
goto out;
}
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
cmd->tmr_req, tcm_function,
GFP_KERNEL);
if (!se_cmd->se_tmr_req)
rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL);
if (rc < 0)
goto out;
cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;

View File

@ -187,7 +187,7 @@ static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* pointer. These will be released directly in tcm_loop_device_reset()
* with transport_generic_free_cmd().
*/
if (se_cmd->se_tmr_req)
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
return 0;
/*
* Release the struct se_cmd, which will make a callback to release
@ -324,7 +324,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tmr *tl_tmr = NULL;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
int ret = FAILED, rc;
/*
* Locate the tcm_loop_hba_t pointer
*/
@ -365,12 +365,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
/*
* Allocate the LUN_RESET TMR
*/
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
TMR_LUN_RESET, GFP_KERNEL);
if (IS_ERR(se_cmd->se_tmr_req))
rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
if (rc < 0)
goto release;
/*
* Locate the underlying TCM struct se_lun from sc->device->lun

View File

@ -40,7 +40,7 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
struct se_tmr_req *core_tmr_alloc_req(
int core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
u8 function,
@ -48,17 +48,20 @@ struct se_tmr_req *core_tmr_alloc_req(
{
struct se_tmr_req *tmr;
tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);
tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
if (!tmr) {
pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
return -ENOMEM;
}
se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
se_cmd->se_tmr_req = tmr;
tmr->task_cmd = se_cmd;
tmr->fabric_tmr_ptr = fabric_tmr_ptr;
tmr->function = function;
INIT_LIST_HEAD(&tmr->tmr_list);
return tmr;
return 0;
}
EXPORT_SYMBOL(core_tmr_alloc_req);
@ -69,7 +72,7 @@ void core_tmr_release_req(
unsigned long flags;
if (!dev) {
kmem_cache_free(se_tmr_req_cache, tmr);
kfree(tmr);
return;
}
@ -77,7 +80,7 @@ void core_tmr_release_req(
list_del(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
kmem_cache_free(se_tmr_req_cache, tmr);
kfree(tmr);
}
static void core_tmr_handle_tas_abort(

View File

@ -58,7 +58,6 @@ static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
@ -82,21 +81,13 @@ static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
0, NULL);
if (!se_sess_cache) {
pr_err("kmem_cache_create() for struct se_session"
" failed\n");
goto out_free_tmr_req_cache;
goto out;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
@ -169,8 +160,6 @@ out_free_ua_cache:
kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
out:
return -ENOMEM;
}
@ -178,7 +167,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
kmem_cache_destroy(t10_pr_reg_cache);
@ -553,7 +541,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
if (!cmd->se_tmr_req)
if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
@ -3367,7 +3355,7 @@ static void transport_release_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
if (cmd->se_tmr_req)
if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
core_tmr_release_req(cmd->se_tmr_req);
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(cmd->t_task_cdb);
@ -3956,7 +3944,7 @@ queue_full:
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
if (wait_for_tasks && cmd->se_tmr_req)
if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
transport_wait_for_tasks(cmd);
transport_release_cmd(cmd);
@ -4282,7 +4270,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
@ -4290,7 +4279,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
* Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
* has been set in transport_set_supported_SAM_opcode().
*/
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}

View File

@ -356,6 +356,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
struct se_tmr_req *tmr;
struct fcp_cmnd *fcp;
struct ft_sess *sess;
int rc;
u8 tm_func;
transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
@ -392,13 +393,12 @@ static void ft_send_tm(struct ft_cmd *cmd)
}
pr_debug("alloc tm cmd fn %d\n", tm_func);
tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
if (!tmr) {
rc = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
if (rc < 0) {
pr_debug("alloc failed\n");
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
return;
}
cmd->se_cmd.se_tmr_req = tmr;
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:

View File

@ -169,7 +169,8 @@ enum se_cmd_flags_table {
SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_NON_DATA_CDB = 0x00000020,
SCF_SCSI_TMR_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
SCF_FUA = 0x00000200,
@ -498,6 +499,24 @@ struct se_task {
struct completion task_stop_comp;
};
struct se_tmr_req {
/* Task Management function to be performed */
u8 function;
/* Task Management response to send */
u8 response;
int call_transport;
/* Reference to ITT that Task Mgmt should be performed */
u32 ref_task_tag;
/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
u64 ref_task_lun;
void *fabric_tmr_ptr;
struct se_cmd *task_cmd;
struct se_cmd *ref_cmd;
struct se_device *tmr_dev;
struct se_lun *tmr_lun;
struct list_head tmr_list;
};
struct se_cmd {
/* SAM response code being sent to initiator */
u8 scsi_status;
@ -586,24 +605,6 @@ struct se_cmd {
};
struct se_tmr_req {
/* Task Management function to be performed */
u8 function;
/* Task Management response to send */
u8 response;
int call_transport;
/* Reference to ITT that Task Mgmt should be performed */
u32 ref_task_tag;
/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
u64 ref_task_lun;
void *fabric_tmr_ptr;
struct se_cmd *task_cmd;
struct se_cmd *ref_cmd;
struct se_device *tmr_dev;
struct se_lun *tmr_lun;
struct list_head tmr_list;
};
struct se_ua {
u8 ua_asc;
u8 ua_ascq;

View File

@ -139,7 +139,7 @@ void target_wait_for_sess_cmds(struct se_session *, int);
int core_alua_check_nonop_delay(struct se_cmd *);
struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u32);