1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2012 RisingTide Systems LLC.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
46 #include "target_core_alua.h"
47 #include "target_core_pr.h"
48 #include "target_core_ua.h"
50 static struct se_hba *lun0_hba;
51 /* not static, needed by tpg.c */
52 struct se_device *g_lun0_dev;
55 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
57 struct se_lun *se_lun = NULL;
58 struct se_session *se_sess = se_cmd->se_sess;
59 struct se_device *dev;
62 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
63 return TCM_NON_EXISTENT_LUN;
65 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
66 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
67 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
68 struct se_dev_entry *deve = se_cmd->se_deve;
72 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
73 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
74 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
75 " Access for 0x%08x\n",
76 se_cmd->se_tfo->get_fabric_name(),
78 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
79 return TCM_WRITE_PROTECTED;
82 if (se_cmd->data_direction == DMA_TO_DEVICE)
83 deve->write_bytes += se_cmd->data_length;
84 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
85 deve->read_bytes += se_cmd->data_length;
87 se_lun = deve->se_lun;
88 se_cmd->se_lun = deve->se_lun;
89 se_cmd->pr_res_key = deve->pr_res_key;
90 se_cmd->orig_fe_lun = unpacked_lun;
91 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
93 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
97 * Use the se_portal_group->tpg_virt_lun0 to allow for
98 * REPORT_LUNS, et al to be returned when no active
99 * MappedLUN=0 exists for this Initiator Port.
101 if (unpacked_lun != 0) {
102 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
103 " Access for 0x%08x\n",
104 se_cmd->se_tfo->get_fabric_name(),
106 return TCM_NON_EXISTENT_LUN;
109 * Force WRITE PROTECT for virtual LUN 0
111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
112 (se_cmd->data_direction != DMA_NONE))
113 return TCM_WRITE_PROTECTED;
115 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
116 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
117 se_cmd->orig_fe_lun = 0;
118 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
121 /* Directly associate cmd with se_dev */
122 se_cmd->se_dev = se_lun->lun_se_dev;
124 /* TODO: get rid of this and use atomics for stats */
125 dev = se_lun->lun_se_dev;
126 spin_lock_irqsave(&dev->stats_lock, flags);
128 if (se_cmd->data_direction == DMA_TO_DEVICE)
129 dev->write_bytes += se_cmd->data_length;
130 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
131 dev->read_bytes += se_cmd->data_length;
132 spin_unlock_irqrestore(&dev->stats_lock, flags);
134 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
135 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
136 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
140 EXPORT_SYMBOL(transport_lookup_cmd_lun);
142 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
144 struct se_dev_entry *deve;
145 struct se_lun *se_lun = NULL;
146 struct se_session *se_sess = se_cmd->se_sess;
147 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
150 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
153 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
154 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
155 deve = se_cmd->se_deve;
157 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
158 se_tmr->tmr_lun = deve->se_lun;
159 se_cmd->se_lun = deve->se_lun;
160 se_lun = deve->se_lun;
161 se_cmd->pr_res_key = deve->pr_res_key;
162 se_cmd->orig_fe_lun = unpacked_lun;
164 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
167 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
168 " Access for 0x%08x\n",
169 se_cmd->se_tfo->get_fabric_name(),
174 /* Directly associate cmd with se_dev */
175 se_cmd->se_dev = se_lun->lun_se_dev;
176 se_tmr->tmr_dev = se_lun->lun_se_dev;
178 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
179 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
180 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
184 EXPORT_SYMBOL(transport_lookup_tmr_lun);
187 * This function is called from core_scsi3_emulate_pro_register_and_move()
188 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
189 * when a matching rtpi is found.
191 struct se_dev_entry *core_get_se_deve_from_rtpi(
192 struct se_node_acl *nacl,
195 struct se_dev_entry *deve;
197 struct se_port *port;
198 struct se_portal_group *tpg = nacl->se_tpg;
201 spin_lock_irq(&nacl->device_list_lock);
202 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
203 deve = nacl->device_list[i];
205 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
210 pr_err("%s device entries device pointer is"
211 " NULL, but Initiator has access.\n",
212 tpg->se_tpg_tfo->get_fabric_name());
217 pr_err("%s device entries device pointer is"
218 " NULL, but Initiator has access.\n",
219 tpg->se_tpg_tfo->get_fabric_name());
222 if (port->sep_rtpi != rtpi)
225 atomic_inc(&deve->pr_ref_count);
226 smp_mb__after_atomic_inc();
227 spin_unlock_irq(&nacl->device_list_lock);
231 spin_unlock_irq(&nacl->device_list_lock);
236 int core_free_device_list_for_node(
237 struct se_node_acl *nacl,
238 struct se_portal_group *tpg)
240 struct se_dev_entry *deve;
244 if (!nacl->device_list)
247 spin_lock_irq(&nacl->device_list_lock);
248 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
249 deve = nacl->device_list[i];
251 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
255 pr_err("%s device entries device pointer is"
256 " NULL, but Initiator has access.\n",
257 tpg->se_tpg_tfo->get_fabric_name());
262 spin_unlock_irq(&nacl->device_list_lock);
263 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
264 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
265 spin_lock_irq(&nacl->device_list_lock);
267 spin_unlock_irq(&nacl->device_list_lock);
269 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
270 nacl->device_list = NULL;
275 void core_update_device_list_access(
278 struct se_node_acl *nacl)
280 struct se_dev_entry *deve;
282 spin_lock_irq(&nacl->device_list_lock);
283 deve = nacl->device_list[mapped_lun];
284 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
285 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
286 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
288 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
289 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
291 spin_unlock_irq(&nacl->device_list_lock);
294 /* core_enable_device_list_for_node():
298 int core_enable_device_list_for_node(
300 struct se_lun_acl *lun_acl,
303 struct se_node_acl *nacl,
304 struct se_portal_group *tpg)
306 struct se_port *port = lun->lun_sep;
307 struct se_dev_entry *deve;
309 spin_lock_irq(&nacl->device_list_lock);
311 deve = nacl->device_list[mapped_lun];
314 * Check if the call is handling demo mode -> explict LUN ACL
315 * transition. This transition must be for the same struct se_lun
316 * + mapped_lun that was setup in demo mode..
318 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
319 if (deve->se_lun_acl != NULL) {
320 pr_err("struct se_dev_entry->se_lun_acl"
321 " already set for demo mode -> explict"
322 " LUN ACL transition\n");
323 spin_unlock_irq(&nacl->device_list_lock);
326 if (deve->se_lun != lun) {
327 pr_err("struct se_dev_entry->se_lun does"
328 " match passed struct se_lun for demo mode"
329 " -> explict LUN ACL transition\n");
330 spin_unlock_irq(&nacl->device_list_lock);
333 deve->se_lun_acl = lun_acl;
335 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
336 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
337 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
339 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
340 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
343 spin_unlock_irq(&nacl->device_list_lock);
348 deve->se_lun_acl = lun_acl;
349 deve->mapped_lun = mapped_lun;
350 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
352 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
353 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
354 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
356 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
357 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
360 deve->creation_time = get_jiffies_64();
361 deve->attach_count++;
362 spin_unlock_irq(&nacl->device_list_lock);
364 spin_lock_bh(&port->sep_alua_lock);
365 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
366 spin_unlock_bh(&port->sep_alua_lock);
371 /* core_disable_device_list_for_node():
375 int core_disable_device_list_for_node(
377 struct se_lun_acl *lun_acl,
380 struct se_node_acl *nacl,
381 struct se_portal_group *tpg)
383 struct se_port *port = lun->lun_sep;
384 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
387 * If the MappedLUN entry is being disabled, the entry in
388 * port->sep_alua_list must be removed now before clearing the
389 * struct se_dev_entry pointers below as logic in
390 * core_alua_do_transition_tg_pt() depends on these being present.
392 * deve->se_lun_acl will be NULL for demo-mode created LUNs
393 * that have not been explicitly converted to MappedLUNs ->
394 * struct se_lun_acl, but we remove deve->alua_port_list from
395 * port->sep_alua_list. This also means that active UAs and
396 * NodeACL context specific PR metadata for demo-mode
397 * MappedLUN *deve will be released below..
399 spin_lock_bh(&port->sep_alua_lock);
400 list_del(&deve->alua_port_list);
401 spin_unlock_bh(&port->sep_alua_lock);
403 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
404 * PR operation to complete.
406 while (atomic_read(&deve->pr_ref_count) != 0)
409 spin_lock_irq(&nacl->device_list_lock);
411 * Disable struct se_dev_entry LUN ACL mapping
413 core_scsi3_ua_release_all(deve);
415 deve->se_lun_acl = NULL;
417 deve->creation_time = 0;
418 deve->attach_count--;
419 spin_unlock_irq(&nacl->device_list_lock);
421 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
425 /* core_clear_lun_from_tpg():
429 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
431 struct se_node_acl *nacl;
432 struct se_dev_entry *deve;
435 spin_lock_irq(&tpg->acl_node_lock);
436 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
437 spin_unlock_irq(&tpg->acl_node_lock);
439 spin_lock_irq(&nacl->device_list_lock);
440 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
441 deve = nacl->device_list[i];
442 if (lun != deve->se_lun)
444 spin_unlock_irq(&nacl->device_list_lock);
446 core_disable_device_list_for_node(lun, NULL,
447 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
450 spin_lock_irq(&nacl->device_list_lock);
452 spin_unlock_irq(&nacl->device_list_lock);
454 spin_lock_irq(&tpg->acl_node_lock);
456 spin_unlock_irq(&tpg->acl_node_lock);
459 static struct se_port *core_alloc_port(struct se_device *dev)
461 struct se_port *port, *port_tmp;
463 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
465 pr_err("Unable to allocate struct se_port\n");
466 return ERR_PTR(-ENOMEM);
468 INIT_LIST_HEAD(&port->sep_alua_list);
469 INIT_LIST_HEAD(&port->sep_list);
470 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
471 spin_lock_init(&port->sep_alua_lock);
472 mutex_init(&port->sep_tg_pt_md_mutex);
474 spin_lock(&dev->se_port_lock);
475 if (dev->dev_port_count == 0x0000ffff) {
476 pr_warn("Reached dev->dev_port_count =="
478 spin_unlock(&dev->se_port_lock);
479 return ERR_PTR(-ENOSPC);
483 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
484 * Here is the table from spc4r17 section 7.7.3.8.
486 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
490 * 1h Relative port 1, historically known as port A
491 * 2h Relative port 2, historically known as port B
492 * 3h to FFFFh Relative port 3 through 65 535
494 port->sep_rtpi = dev->dev_rpti_counter++;
498 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
500 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
503 if (port->sep_rtpi == port_tmp->sep_rtpi)
506 spin_unlock(&dev->se_port_lock);
511 static void core_export_port(
512 struct se_device *dev,
513 struct se_portal_group *tpg,
514 struct se_port *port,
517 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
519 spin_lock(&dev->se_port_lock);
520 spin_lock(&lun->lun_sep_lock);
524 spin_unlock(&lun->lun_sep_lock);
526 list_add_tail(&port->sep_list, &dev->dev_sep_list);
527 spin_unlock(&dev->se_port_lock);
529 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
530 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
531 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
532 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
533 pr_err("Unable to allocate t10_alua_tg_pt"
537 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
538 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
539 dev->t10_alua.default_tg_pt_gp);
540 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
541 pr_debug("%s/%s: Adding to default ALUA Target Port"
542 " Group: alua/default_tg_pt_gp\n",
543 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
546 dev->dev_port_count++;
547 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
551 * Called with struct se_device->se_port_lock spinlock held.
553 static void core_release_port(struct se_device *dev, struct se_port *port)
554 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
557 * Wait for any port reference for PR ALL_TG_PT=1 operation
558 * to complete in __core_scsi3_alloc_registration()
560 spin_unlock(&dev->se_port_lock);
561 if (atomic_read(&port->sep_tg_pt_ref_cnt))
563 spin_lock(&dev->se_port_lock);
565 core_alua_free_tg_pt_gp_mem(port);
567 list_del(&port->sep_list);
568 dev->dev_port_count--;
573 struct se_device *dev,
574 struct se_portal_group *tpg,
577 struct se_hba *hba = dev->se_hba;
578 struct se_port *port;
580 port = core_alloc_port(dev);
582 return PTR_ERR(port);
584 lun->lun_se_dev = dev;
586 spin_lock(&hba->device_lock);
588 spin_unlock(&hba->device_lock);
590 core_export_port(dev, tpg, port, lun);
594 void core_dev_unexport(
595 struct se_device *dev,
596 struct se_portal_group *tpg,
599 struct se_hba *hba = dev->se_hba;
600 struct se_port *port = lun->lun_sep;
602 spin_lock(&lun->lun_sep_lock);
603 if (lun->lun_se_dev == NULL) {
604 spin_unlock(&lun->lun_sep_lock);
607 spin_unlock(&lun->lun_sep_lock);
609 spin_lock(&dev->se_port_lock);
610 core_release_port(dev, port);
611 spin_unlock(&dev->se_port_lock);
613 spin_lock(&hba->device_lock);
615 spin_unlock(&hba->device_lock);
618 lun->lun_se_dev = NULL;
621 static void se_release_vpd_for_dev(struct se_device *dev)
623 struct t10_vpd *vpd, *vpd_tmp;
625 spin_lock(&dev->t10_wwn.t10_vpd_lock);
626 list_for_each_entry_safe(vpd, vpd_tmp,
627 &dev->t10_wwn.t10_vpd_list, vpd_list) {
628 list_del(&vpd->vpd_list);
631 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
634 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
636 u32 aligned_max_sectors;
639 * Limit max_sectors to a PAGE_SIZE aligned value for modern
640 * transport_allocate_data_tasks() operation.
642 alignment = max(1ul, PAGE_SIZE / block_size);
643 aligned_max_sectors = rounddown(max_sectors, alignment);
645 if (max_sectors != aligned_max_sectors)
646 pr_info("Rounding down aligned max_sectors from %u to %u\n",
647 max_sectors, aligned_max_sectors);
649 return aligned_max_sectors;
652 int se_dev_set_max_unmap_lba_count(
653 struct se_device *dev,
654 u32 max_unmap_lba_count)
656 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
657 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
658 dev, dev->dev_attrib.max_unmap_lba_count);
662 int se_dev_set_max_unmap_block_desc_count(
663 struct se_device *dev,
664 u32 max_unmap_block_desc_count)
666 dev->dev_attrib.max_unmap_block_desc_count =
667 max_unmap_block_desc_count;
668 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
669 dev, dev->dev_attrib.max_unmap_block_desc_count);
673 int se_dev_set_unmap_granularity(
674 struct se_device *dev,
675 u32 unmap_granularity)
677 dev->dev_attrib.unmap_granularity = unmap_granularity;
678 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
679 dev, dev->dev_attrib.unmap_granularity);
683 int se_dev_set_unmap_granularity_alignment(
684 struct se_device *dev,
685 u32 unmap_granularity_alignment)
687 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
688 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
689 dev, dev->dev_attrib.unmap_granularity_alignment);
693 int se_dev_set_max_write_same_len(
694 struct se_device *dev,
695 u32 max_write_same_len)
697 dev->dev_attrib.max_write_same_len = max_write_same_len;
698 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
699 dev, dev->dev_attrib.max_write_same_len);
703 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
705 const char *configname;
707 configname = config_item_name(&dev->dev_group.cg_item);
708 if (strlen(configname) >= 16) {
709 pr_warn("dev[%p]: Backstore name '%s' is too long for "
710 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
713 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
716 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
718 if (dev->export_count) {
719 pr_err("dev[%p]: Unable to change model alias"
720 " while export_count is %d\n",
721 dev, dev->export_count);
725 if (flag != 0 && flag != 1) {
726 pr_err("Illegal value %d\n", flag);
731 dev_set_t10_wwn_model_alias(dev);
733 strncpy(&dev->t10_wwn.model[0],
734 dev->transport->inquiry_prod, 16);
736 dev->dev_attrib.emulate_model_alias = flag;
741 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
743 if (flag != 0 && flag != 1) {
744 pr_err("Illegal value %d\n", flag);
749 pr_err("dpo_emulated not supported\n");
756 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
758 if (flag != 0 && flag != 1) {
759 pr_err("Illegal value %d\n", flag);
764 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
765 pr_err("emulate_fua_write not supported for pSCSI\n");
768 dev->dev_attrib.emulate_fua_write = flag;
769 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
770 dev, dev->dev_attrib.emulate_fua_write);
774 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
776 if (flag != 0 && flag != 1) {
777 pr_err("Illegal value %d\n", flag);
782 pr_err("ua read emulated not supported\n");
789 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
791 if (flag != 0 && flag != 1) {
792 pr_err("Illegal value %d\n", flag);
796 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
797 pr_err("emulate_write_cache not supported for pSCSI\n");
801 dev->transport->get_write_cache) {
802 pr_err("emulate_write_cache not supported for this device\n");
806 dev->dev_attrib.emulate_write_cache = flag;
807 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
808 dev, dev->dev_attrib.emulate_write_cache);
812 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
814 if ((flag != 0) && (flag != 1) && (flag != 2)) {
815 pr_err("Illegal value %d\n", flag);
819 if (dev->export_count) {
820 pr_err("dev[%p]: Unable to change SE Device"
821 " UA_INTRLCK_CTRL while export_count is %d\n",
822 dev, dev->export_count);
825 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
826 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
827 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
832 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
834 if ((flag != 0) && (flag != 1)) {
835 pr_err("Illegal value %d\n", flag);
839 if (dev->export_count) {
840 pr_err("dev[%p]: Unable to change SE Device TAS while"
841 " export_count is %d\n",
842 dev, dev->export_count);
845 dev->dev_attrib.emulate_tas = flag;
846 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
847 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
852 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
854 if ((flag != 0) && (flag != 1)) {
855 pr_err("Illegal value %d\n", flag);
859 * We expect this value to be non-zero when generic Block Layer
860 * Discard supported is detected iblock_create_virtdevice().
862 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
863 pr_err("Generic Block Discard not supported\n");
867 dev->dev_attrib.emulate_tpu = flag;
868 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
873 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
875 if ((flag != 0) && (flag != 1)) {
876 pr_err("Illegal value %d\n", flag);
880 * We expect this value to be non-zero when generic Block Layer
881 * Discard supported is detected iblock_create_virtdevice().
883 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
884 pr_err("Generic Block Discard not supported\n");
888 dev->dev_attrib.emulate_tpws = flag;
889 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
894 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
896 if ((flag != 0) && (flag != 1)) {
897 pr_err("Illegal value %d\n", flag);
900 dev->dev_attrib.enforce_pr_isids = flag;
901 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
902 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
906 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
908 if ((flag != 0) && (flag != 1)) {
909 printk(KERN_ERR "Illegal value %d\n", flag);
912 dev->dev_attrib.is_nonrot = flag;
913 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
918 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
921 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
922 " reordering not implemented\n", dev);
925 dev->dev_attrib.emulate_rest_reord = flag;
926 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
931 * Note, this can only be called on unexported SE Device Object.
933 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
935 if (dev->export_count) {
936 pr_err("dev[%p]: Unable to change SE Device TCQ while"
937 " export_count is %d\n",
938 dev, dev->export_count);
942 pr_err("dev[%p]: Illegal ZERO value for queue"
947 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
948 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
949 pr_err("dev[%p]: Passed queue_depth: %u"
950 " exceeds TCM/SE_Device TCQ: %u\n",
952 dev->dev_attrib.hw_queue_depth);
956 if (queue_depth > dev->dev_attrib.queue_depth) {
957 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
958 pr_err("dev[%p]: Passed queue_depth:"
959 " %u exceeds TCM/SE_Device MAX"
960 " TCQ: %u\n", dev, queue_depth,
961 dev->dev_attrib.hw_queue_depth);
967 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
968 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
973 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
975 int block_size = dev->dev_attrib.block_size;
977 if (dev->export_count) {
978 pr_err("dev[%p]: Unable to change SE Device"
979 " fabric_max_sectors while export_count is %d\n",
980 dev, dev->export_count);
983 if (!fabric_max_sectors) {
984 pr_err("dev[%p]: Illegal ZERO value for"
985 " fabric_max_sectors\n", dev);
988 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
989 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
990 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
991 DA_STATUS_MAX_SECTORS_MIN);
994 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
995 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
996 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
997 " greater than TCM/SE_Device max_sectors:"
998 " %u\n", dev, fabric_max_sectors,
999 dev->dev_attrib.hw_max_sectors);
1003 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1004 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1005 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1006 " %u\n", dev, fabric_max_sectors,
1007 DA_STATUS_MAX_SECTORS_MAX);
1012 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1016 pr_warn("Defaulting to 512 for zero block_size\n");
1018 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1021 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1022 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1023 dev, fabric_max_sectors);
1027 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1029 if (dev->export_count) {
1030 pr_err("dev[%p]: Unable to change SE Device"
1031 " optimal_sectors while export_count is %d\n",
1032 dev, dev->export_count);
1035 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1036 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1037 " changed for TCM/pSCSI\n", dev);
1040 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1041 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1042 " greater than fabric_max_sectors: %u\n", dev,
1043 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1047 dev->dev_attrib.optimal_sectors = optimal_sectors;
1048 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1049 dev, optimal_sectors);
1053 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1055 if (dev->export_count) {
1056 pr_err("dev[%p]: Unable to change SE Device block_size"
1057 " while export_count is %d\n",
1058 dev, dev->export_count);
1062 if ((block_size != 512) &&
1063 (block_size != 1024) &&
1064 (block_size != 2048) &&
1065 (block_size != 4096)) {
1066 pr_err("dev[%p]: Illegal value for block_device: %u"
1067 " for SE device, must be 512, 1024, 2048 or 4096\n",
1072 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1073 pr_err("dev[%p]: Not allowed to change block_size for"
1074 " Physical Device, use for Linux/SCSI to change"
1075 " block_size for underlying hardware\n", dev);
1079 dev->dev_attrib.block_size = block_size;
1080 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1083 if (dev->dev_attrib.max_bytes_per_io)
1084 dev->dev_attrib.hw_max_sectors =
1085 dev->dev_attrib.max_bytes_per_io / block_size;
1090 struct se_lun *core_dev_add_lun(
1091 struct se_portal_group *tpg,
1092 struct se_device *dev,
1095 struct se_lun *lun_p;
1098 lun_p = core_tpg_pre_addlun(tpg, lun);
1102 rc = core_tpg_post_addlun(tpg, lun_p,
1103 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1107 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1108 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1109 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1110 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1112 * Update LUN maps for dynamically added initiators when
1113 * generate_node_acl is enabled.
1115 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1116 struct se_node_acl *acl;
1117 spin_lock_irq(&tpg->acl_node_lock);
1118 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1119 if (acl->dynamic_node_acl &&
1120 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1121 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1122 spin_unlock_irq(&tpg->acl_node_lock);
1123 core_tpg_add_node_to_devs(acl, tpg);
1124 spin_lock_irq(&tpg->acl_node_lock);
1127 spin_unlock_irq(&tpg->acl_node_lock);
1133 /* core_dev_del_lun():
1137 int core_dev_del_lun(
1138 struct se_portal_group *tpg,
1143 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1145 return PTR_ERR(lun);
1147 core_tpg_post_dellun(tpg, lun);
1149 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1150 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1151 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1152 tpg->se_tpg_tfo->get_fabric_name());
1157 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1161 spin_lock(&tpg->tpg_lun_lock);
1162 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1163 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1164 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1165 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1166 TRANSPORT_MAX_LUNS_PER_TPG-1,
1167 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1168 spin_unlock(&tpg->tpg_lun_lock);
1171 lun = tpg->tpg_lun_list[unpacked_lun];
1173 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1174 pr_err("%s Logical Unit Number: %u is not free on"
1175 " Target Portal Group: %hu, ignoring request.\n",
1176 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1177 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1178 spin_unlock(&tpg->tpg_lun_lock);
1181 spin_unlock(&tpg->tpg_lun_lock);
1186 /* core_dev_get_lun():
1190 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1194 spin_lock(&tpg->tpg_lun_lock);
1195 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1196 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1197 "_TPG-1: %u for Target Portal Group: %hu\n",
1198 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1199 TRANSPORT_MAX_LUNS_PER_TPG-1,
1200 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1201 spin_unlock(&tpg->tpg_lun_lock);
1204 lun = tpg->tpg_lun_list[unpacked_lun];
1206 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1207 pr_err("%s Logical Unit Number: %u is not active on"
1208 " Target Portal Group: %hu, ignoring request.\n",
1209 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1210 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1211 spin_unlock(&tpg->tpg_lun_lock);
1214 spin_unlock(&tpg->tpg_lun_lock);
1219 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1220 struct se_portal_group *tpg,
1221 struct se_node_acl *nacl,
1225 struct se_lun_acl *lacl;
1227 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1228 pr_err("%s InitiatorName exceeds maximum size.\n",
1229 tpg->se_tpg_tfo->get_fabric_name());
1233 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1235 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1240 INIT_LIST_HEAD(&lacl->lacl_list);
1241 lacl->mapped_lun = mapped_lun;
1242 lacl->se_lun_nacl = nacl;
1243 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1244 nacl->initiatorname);
1249 int core_dev_add_initiator_node_lun_acl(
1250 struct se_portal_group *tpg,
1251 struct se_lun_acl *lacl,
1256 struct se_node_acl *nacl;
1258 lun = core_dev_get_lun(tpg, unpacked_lun);
1260 pr_err("%s Logical Unit Number: %u is not active on"
1261 " Target Portal Group: %hu, ignoring request.\n",
1262 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1263 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1267 nacl = lacl->se_lun_nacl;
1271 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1272 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1273 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1277 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1278 lun_access, nacl, tpg) < 0)
1281 spin_lock(&lun->lun_acl_lock);
1282 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1283 atomic_inc(&lun->lun_acl_count);
1284 smp_mb__after_atomic_inc();
1285 spin_unlock(&lun->lun_acl_lock);
1287 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1288 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1289 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1290 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1291 lacl->initiatorname);
1293 * Check to see if there are any existing persistent reservation APTPL
1294 * pre-registrations that need to be enabled for this LUN ACL..
1296 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1300 /* core_dev_del_initiator_node_lun_acl():
1304 int core_dev_del_initiator_node_lun_acl(
1305 struct se_portal_group *tpg,
1307 struct se_lun_acl *lacl)
1309 struct se_node_acl *nacl;
1311 nacl = lacl->se_lun_nacl;
1315 spin_lock(&lun->lun_acl_lock);
1316 list_del(&lacl->lacl_list);
1317 atomic_dec(&lun->lun_acl_count);
1318 smp_mb__after_atomic_dec();
1319 spin_unlock(&lun->lun_acl_lock);
1321 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1322 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1324 lacl->se_lun = NULL;
1326 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1327 " InitiatorNode: %s Mapped LUN: %u\n",
1328 tpg->se_tpg_tfo->get_fabric_name(),
1329 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1330 lacl->initiatorname, lacl->mapped_lun);
1335 void core_dev_free_initiator_node_lun_acl(
1336 struct se_portal_group *tpg,
1337 struct se_lun_acl *lacl)
1339 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1340 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1341 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1342 tpg->se_tpg_tfo->get_fabric_name(),
1343 lacl->initiatorname, lacl->mapped_lun);
1348 static void scsi_dump_inquiry(struct se_device *dev)
1350 struct t10_wwn *wwn = &dev->t10_wwn;
1354 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1356 for (i = 0; i < 8; i++)
1357 if (wwn->vendor[i] >= 0x20)
1358 buf[i] = wwn->vendor[i];
1362 pr_debug(" Vendor: %s\n", buf);
1364 for (i = 0; i < 16; i++)
1365 if (wwn->model[i] >= 0x20)
1366 buf[i] = wwn->model[i];
1370 pr_debug(" Model: %s\n", buf);
1372 for (i = 0; i < 4; i++)
1373 if (wwn->revision[i] >= 0x20)
1374 buf[i] = wwn->revision[i];
1378 pr_debug(" Revision: %s\n", buf);
1380 device_type = dev->transport->get_device_type(dev);
1381 pr_debug(" Type: %s ", scsi_device_type(device_type));
1384 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1386 struct se_device *dev;
1388 dev = hba->transport->alloc_device(hba, name);
1392 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
1394 dev->transport = hba->transport;
1396 INIT_LIST_HEAD(&dev->dev_list);
1397 INIT_LIST_HEAD(&dev->dev_sep_list);
1398 INIT_LIST_HEAD(&dev->dev_tmr_list);
1399 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1400 INIT_LIST_HEAD(&dev->state_list);
1401 INIT_LIST_HEAD(&dev->qf_cmd_list);
1402 spin_lock_init(&dev->stats_lock);
1403 spin_lock_init(&dev->execute_task_lock);
1404 spin_lock_init(&dev->delayed_cmd_lock);
1405 spin_lock_init(&dev->dev_reservation_lock);
1406 spin_lock_init(&dev->se_port_lock);
1407 spin_lock_init(&dev->se_tmr_lock);
1408 spin_lock_init(&dev->qf_cmd_lock);
1409 atomic_set(&dev->dev_ordered_id, 0);
1410 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1411 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1412 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1413 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1414 spin_lock_init(&dev->t10_pr.registration_lock);
1415 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1416 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1417 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1419 dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1420 dev->t10_wwn.t10_dev = dev;
1421 dev->t10_alua.t10_dev = dev;
1423 dev->dev_attrib.da_dev = dev;
1424 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1425 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1426 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1427 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1428 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1429 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1430 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1431 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1432 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1433 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1434 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1435 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1436 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1437 dev->dev_attrib.max_unmap_block_desc_count =
1438 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1439 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1440 dev->dev_attrib.unmap_granularity_alignment =
1441 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1442 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1443 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1444 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1449 int target_configure_device(struct se_device *dev)
1451 struct se_hba *hba = dev->se_hba;
1454 if (dev->dev_flags & DF_CONFIGURED) {
1455 pr_err("se_dev->se_dev_ptr already set for storage"
1460 ret = dev->transport->configure_device(dev);
1463 dev->dev_flags |= DF_CONFIGURED;
1466 * XXX: there is not much point to have two different values here..
1468 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1469 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1472 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1474 dev->dev_attrib.hw_max_sectors =
1475 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1476 dev->dev_attrib.hw_block_size);
1478 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1479 dev->creation_time = get_jiffies_64();
1481 ret = core_setup_alua(dev);
1486 * Startup the struct se_device processing thread
1488 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1489 dev->transport->name);
1491 pr_err("Unable to create tmr workqueue for %s\n",
1492 dev->transport->name);
1498 * Setup work_queue for QUEUE_FULL
1500 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1503 * Preload the initial INQUIRY const values if we are doing
1504 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1505 * passthrough because this is being provided by the backend LLD.
1507 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1508 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1509 strncpy(&dev->t10_wwn.model[0],
1510 dev->transport->inquiry_prod, 16);
1511 strncpy(&dev->t10_wwn.revision[0],
1512 dev->transport->inquiry_rev, 4);
1515 scsi_dump_inquiry(dev);
1517 spin_lock(&hba->device_lock);
1519 spin_unlock(&hba->device_lock);
1523 core_alua_free_lu_gp_mem(dev);
1525 se_release_vpd_for_dev(dev);
1529 void target_free_device(struct se_device *dev)
1531 struct se_hba *hba = dev->se_hba;
1533 WARN_ON(!list_empty(&dev->dev_sep_list));
1535 if (dev->dev_flags & DF_CONFIGURED) {
1536 destroy_workqueue(dev->tmr_wq);
1538 spin_lock(&hba->device_lock);
1540 spin_unlock(&hba->device_lock);
1543 core_alua_free_lu_gp_mem(dev);
1544 core_scsi3_free_all_registrations(dev);
1545 se_release_vpd_for_dev(dev);
1547 dev->transport->free_device(dev);
1550 int core_dev_setup_virtual_lun0(void)
1553 struct se_device *dev;
1557 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1559 return PTR_ERR(hba);
1561 dev = target_alloc_device(hba, "virt_lun0");
1568 sprintf(buf, "rd_pages=8");
1569 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1571 ret = target_configure_device(dev);
1573 goto out_free_se_dev;
1580 target_free_device(dev);
1582 core_delete_hba(hba);
1587 void core_dev_release_virtual_lun0(void)
1589 struct se_hba *hba = lun0_hba;
1595 target_free_device(g_lun0_dev);
1596 core_delete_hba(hba);