1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
38 #include <linux/export.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_tpg.h>
47 #include <target/target_core_transport.h>
48 #include <target/target_core_fabric_ops.h>
50 #include "target_core_alua.h"
51 #include "target_core_hba.h"
52 #include "target_core_pr.h"
53 #include "target_core_ua.h"
55 static void se_dev_start(struct se_device *dev);
56 static void se_dev_stop(struct se_device *dev);
58 static struct se_hba *lun0_hba;
59 static struct se_subsystem_dev *lun0_su_dev;
60 /* not static, needed by tpg.c */
61 struct se_device *g_lun0_dev;
63 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
65 struct se_lun *se_lun = NULL;
66 struct se_session *se_sess = se_cmd->se_sess;
67 struct se_device *dev;
70 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
71 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
72 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
76 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
77 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
78 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
79 struct se_dev_entry *deve = se_cmd->se_deve;
82 deve->total_bytes += se_cmd->data_length;
84 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
85 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
86 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
87 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
88 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
89 " Access for 0x%08x\n",
90 se_cmd->se_tfo->get_fabric_name(),
92 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
96 if (se_cmd->data_direction == DMA_TO_DEVICE)
97 deve->write_bytes += se_cmd->data_length;
98 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
99 deve->read_bytes += se_cmd->data_length;
103 se_lun = deve->se_lun;
104 se_cmd->se_lun = deve->se_lun;
105 se_cmd->pr_res_key = deve->pr_res_key;
106 se_cmd->orig_fe_lun = unpacked_lun;
107 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
108 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
110 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
114 * Use the se_portal_group->tpg_virt_lun0 to allow for
115 * REPORT_LUNS, et al to be returned when no active
116 * MappedLUN=0 exists for this Initiator Port.
118 if (unpacked_lun != 0) {
119 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
120 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
121 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
122 " Access for 0x%08x\n",
123 se_cmd->se_tfo->get_fabric_name(),
128 * Force WRITE PROTECT for virtual LUN 0
130 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
131 (se_cmd->data_direction != DMA_NONE)) {
132 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
133 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
137 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
139 se_cmd->orig_fe_lun = 0;
140 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
141 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
144 * Determine if the struct se_lun is online.
145 * FIXME: Check for LUN_RESET + UNIT Attention
147 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
148 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
149 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
153 /* Directly associate cmd with se_dev */
154 se_cmd->se_dev = se_lun->lun_se_dev;
156 /* TODO: get rid of this and use atomics for stats */
157 dev = se_lun->lun_se_dev;
158 spin_lock_irqsave(&dev->stats_lock, flags);
160 if (se_cmd->data_direction == DMA_TO_DEVICE)
161 dev->write_bytes += se_cmd->data_length;
162 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
163 dev->read_bytes += se_cmd->data_length;
164 spin_unlock_irqrestore(&dev->stats_lock, flags);
167 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
168 * for tracking state of struct se_cmds during LUN shutdown events.
170 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
171 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
172 atomic_set(&se_cmd->transport_lun_active, 1);
173 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
177 EXPORT_SYMBOL(transport_lookup_cmd_lun);
179 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
181 struct se_dev_entry *deve;
182 struct se_lun *se_lun = NULL;
183 struct se_session *se_sess = se_cmd->se_sess;
184 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
187 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
188 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
189 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
193 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
194 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
195 deve = se_cmd->se_deve;
197 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
198 se_tmr->tmr_lun = deve->se_lun;
199 se_cmd->se_lun = deve->se_lun;
200 se_lun = deve->se_lun;
201 se_cmd->pr_res_key = deve->pr_res_key;
202 se_cmd->orig_fe_lun = unpacked_lun;
203 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
205 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
208 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
209 " Access for 0x%08x\n",
210 se_cmd->se_tfo->get_fabric_name(),
212 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
216 * Determine if the struct se_lun is online.
217 * FIXME: Check for LUN_RESET + UNIT Attention
219 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
220 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
224 /* Directly associate cmd with se_dev */
225 se_cmd->se_dev = se_lun->lun_se_dev;
226 se_tmr->tmr_dev = se_lun->lun_se_dev;
228 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
229 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
230 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
234 EXPORT_SYMBOL(transport_lookup_tmr_lun);
237 * This function is called from core_scsi3_emulate_pro_register_and_move()
238 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
239 * when a matching rtpi is found.
241 struct se_dev_entry *core_get_se_deve_from_rtpi(
242 struct se_node_acl *nacl,
245 struct se_dev_entry *deve;
247 struct se_port *port;
248 struct se_portal_group *tpg = nacl->se_tpg;
251 spin_lock_irq(&nacl->device_list_lock);
252 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
253 deve = &nacl->device_list[i];
255 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
260 pr_err("%s device entries device pointer is"
261 " NULL, but Initiator has access.\n",
262 tpg->se_tpg_tfo->get_fabric_name());
267 pr_err("%s device entries device pointer is"
268 " NULL, but Initiator has access.\n",
269 tpg->se_tpg_tfo->get_fabric_name());
272 if (port->sep_rtpi != rtpi)
275 atomic_inc(&deve->pr_ref_count);
276 smp_mb__after_atomic_inc();
277 spin_unlock_irq(&nacl->device_list_lock);
281 spin_unlock_irq(&nacl->device_list_lock);
286 int core_free_device_list_for_node(
287 struct se_node_acl *nacl,
288 struct se_portal_group *tpg)
290 struct se_dev_entry *deve;
294 if (!nacl->device_list)
297 spin_lock_irq(&nacl->device_list_lock);
298 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
299 deve = &nacl->device_list[i];
301 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
305 pr_err("%s device entries device pointer is"
306 " NULL, but Initiator has access.\n",
307 tpg->se_tpg_tfo->get_fabric_name());
312 spin_unlock_irq(&nacl->device_list_lock);
313 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
314 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
315 spin_lock_irq(&nacl->device_list_lock);
317 spin_unlock_irq(&nacl->device_list_lock);
319 kfree(nacl->device_list);
320 nacl->device_list = NULL;
325 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
327 struct se_dev_entry *deve;
329 spin_lock_irq(&se_nacl->device_list_lock);
330 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
332 spin_unlock_irq(&se_nacl->device_list_lock);
335 void core_update_device_list_access(
338 struct se_node_acl *nacl)
340 struct se_dev_entry *deve;
342 spin_lock_irq(&nacl->device_list_lock);
343 deve = &nacl->device_list[mapped_lun];
344 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
345 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
346 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
348 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
349 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
351 spin_unlock_irq(&nacl->device_list_lock);
354 /* core_update_device_list_for_node():
358 int core_update_device_list_for_node(
360 struct se_lun_acl *lun_acl,
363 struct se_node_acl *nacl,
364 struct se_portal_group *tpg,
367 struct se_port *port = lun->lun_sep;
368 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
371 * If the MappedLUN entry is being disabled, the entry in
372 * port->sep_alua_list must be removed now before clearing the
373 * struct se_dev_entry pointers below as logic in
374 * core_alua_do_transition_tg_pt() depends on these being present.
378 * deve->se_lun_acl will be NULL for demo-mode created LUNs
379 * that have not been explicitly concerted to MappedLUNs ->
380 * struct se_lun_acl, but we remove deve->alua_port_list from
381 * port->sep_alua_list. This also means that active UAs and
382 * NodeACL context specific PR metadata for demo-mode
383 * MappedLUN *deve will be released below..
385 spin_lock_bh(&port->sep_alua_lock);
386 list_del(&deve->alua_port_list);
387 spin_unlock_bh(&port->sep_alua_lock);
390 spin_lock_irq(&nacl->device_list_lock);
393 * Check if the call is handling demo mode -> explict LUN ACL
394 * transition. This transition must be for the same struct se_lun
395 * + mapped_lun that was setup in demo mode..
397 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
398 if (deve->se_lun_acl != NULL) {
399 pr_err("struct se_dev_entry->se_lun_acl"
400 " already set for demo mode -> explict"
401 " LUN ACL transition\n");
402 spin_unlock_irq(&nacl->device_list_lock);
405 if (deve->se_lun != lun) {
406 pr_err("struct se_dev_entry->se_lun does"
407 " match passed struct se_lun for demo mode"
408 " -> explict LUN ACL transition\n");
409 spin_unlock_irq(&nacl->device_list_lock);
412 deve->se_lun_acl = lun_acl;
416 deve->se_lun_acl = lun_acl;
417 deve->mapped_lun = mapped_lun;
418 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
421 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
422 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
423 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
425 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
426 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
430 spin_unlock_irq(&nacl->device_list_lock);
433 deve->creation_time = get_jiffies_64();
434 deve->attach_count++;
435 spin_unlock_irq(&nacl->device_list_lock);
437 spin_lock_bh(&port->sep_alua_lock);
438 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
439 spin_unlock_bh(&port->sep_alua_lock);
444 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
445 * PR operation to complete.
447 spin_unlock_irq(&nacl->device_list_lock);
448 while (atomic_read(&deve->pr_ref_count) != 0)
450 spin_lock_irq(&nacl->device_list_lock);
452 * Disable struct se_dev_entry LUN ACL mapping
454 core_scsi3_ua_release_all(deve);
456 deve->se_lun_acl = NULL;
458 deve->creation_time = 0;
459 deve->attach_count--;
460 spin_unlock_irq(&nacl->device_list_lock);
462 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
466 /* core_clear_lun_from_tpg():
470 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
472 struct se_node_acl *nacl;
473 struct se_dev_entry *deve;
476 spin_lock_irq(&tpg->acl_node_lock);
477 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
478 spin_unlock_irq(&tpg->acl_node_lock);
480 spin_lock_irq(&nacl->device_list_lock);
481 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
482 deve = &nacl->device_list[i];
483 if (lun != deve->se_lun)
485 spin_unlock_irq(&nacl->device_list_lock);
487 core_update_device_list_for_node(lun, NULL,
488 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
491 spin_lock_irq(&nacl->device_list_lock);
493 spin_unlock_irq(&nacl->device_list_lock);
495 spin_lock_irq(&tpg->acl_node_lock);
497 spin_unlock_irq(&tpg->acl_node_lock);
500 static struct se_port *core_alloc_port(struct se_device *dev)
502 struct se_port *port, *port_tmp;
504 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
506 pr_err("Unable to allocate struct se_port\n");
507 return ERR_PTR(-ENOMEM);
509 INIT_LIST_HEAD(&port->sep_alua_list);
510 INIT_LIST_HEAD(&port->sep_list);
511 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
512 spin_lock_init(&port->sep_alua_lock);
513 mutex_init(&port->sep_tg_pt_md_mutex);
515 spin_lock(&dev->se_port_lock);
516 if (dev->dev_port_count == 0x0000ffff) {
517 pr_warn("Reached dev->dev_port_count =="
519 spin_unlock(&dev->se_port_lock);
520 return ERR_PTR(-ENOSPC);
524 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
525 * Here is the table from spc4r17 section 7.7.3.8.
527 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
531 * 1h Relative port 1, historically known as port A
532 * 2h Relative port 2, historically known as port B
533 * 3h to FFFFh Relative port 3 through 65 535
535 port->sep_rtpi = dev->dev_rpti_counter++;
539 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
541 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
544 if (port->sep_rtpi == port_tmp->sep_rtpi)
547 spin_unlock(&dev->se_port_lock);
552 static void core_export_port(
553 struct se_device *dev,
554 struct se_portal_group *tpg,
555 struct se_port *port,
558 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
559 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
561 spin_lock(&dev->se_port_lock);
562 spin_lock(&lun->lun_sep_lock);
566 spin_unlock(&lun->lun_sep_lock);
568 list_add_tail(&port->sep_list, &dev->dev_sep_list);
569 spin_unlock(&dev->se_port_lock);
571 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
572 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
573 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
574 pr_err("Unable to allocate t10_alua_tg_pt"
578 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
579 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
580 su_dev->t10_alua.default_tg_pt_gp);
581 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
582 pr_debug("%s/%s: Adding to default ALUA Target Port"
583 " Group: alua/default_tg_pt_gp\n",
584 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
587 dev->dev_port_count++;
588 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
592 * Called with struct se_device->se_port_lock spinlock held.
594 static void core_release_port(struct se_device *dev, struct se_port *port)
595 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
598 * Wait for any port reference for PR ALL_TG_PT=1 operation
599 * to complete in __core_scsi3_alloc_registration()
601 spin_unlock(&dev->se_port_lock);
602 if (atomic_read(&port->sep_tg_pt_ref_cnt))
604 spin_lock(&dev->se_port_lock);
606 core_alua_free_tg_pt_gp_mem(port);
608 list_del(&port->sep_list);
609 dev->dev_port_count--;
614 struct se_device *dev,
615 struct se_portal_group *tpg,
618 struct se_port *port;
620 port = core_alloc_port(dev);
622 return PTR_ERR(port);
624 lun->lun_se_dev = dev;
627 atomic_inc(&dev->dev_export_obj.obj_access_count);
628 core_export_port(dev, tpg, port, lun);
632 void core_dev_unexport(
633 struct se_device *dev,
634 struct se_portal_group *tpg,
637 struct se_port *port = lun->lun_sep;
639 spin_lock(&lun->lun_sep_lock);
640 if (lun->lun_se_dev == NULL) {
641 spin_unlock(&lun->lun_sep_lock);
644 spin_unlock(&lun->lun_sep_lock);
646 spin_lock(&dev->se_port_lock);
647 atomic_dec(&dev->dev_export_obj.obj_access_count);
648 core_release_port(dev, port);
649 spin_unlock(&dev->se_port_lock);
652 lun->lun_se_dev = NULL;
655 int transport_core_report_lun_response(struct se_cmd *se_cmd)
657 struct se_dev_entry *deve;
658 struct se_lun *se_lun;
659 struct se_session *se_sess = se_cmd->se_sess;
660 struct se_task *se_task;
662 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
664 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
668 pr_err("Unable to locate struct se_task for struct se_cmd\n");
669 return PYX_TRANSPORT_LU_COMM_FAILURE;
672 buf = transport_kmap_first_data_page(se_cmd);
675 * If no struct se_session pointer is present, this struct se_cmd is
676 * coming via a target_core_mod PASSTHROUGH op, and not through
677 * a $FABRIC_MOD. In that case, report LUN=0 only.
680 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
685 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
686 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
687 deve = &se_sess->se_node_acl->device_list[i];
688 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
690 se_lun = deve->se_lun;
692 * We determine the correct LUN LIST LENGTH even once we
693 * have reached the initial allocation length.
697 if ((cdb_offset + 8) >= se_cmd->data_length)
700 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
704 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
707 * See SPC3 r07, page 159.
710 transport_kunmap_first_data_page(se_cmd);
712 buf[0] = ((lun_count >> 24) & 0xff);
713 buf[1] = ((lun_count >> 16) & 0xff);
714 buf[2] = ((lun_count >> 8) & 0xff);
715 buf[3] = (lun_count & 0xff);
717 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
720 /* se_release_device_for_hba():
724 void se_release_device_for_hba(struct se_device *dev)
726 struct se_hba *hba = dev->se_hba;
728 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
729 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
730 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
731 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
732 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
736 kthread_stop(dev->process_thread);
737 if (dev->transport->free_device)
738 dev->transport->free_device(dev->dev_ptr);
741 spin_lock(&hba->device_lock);
742 list_del(&dev->dev_list);
744 spin_unlock(&hba->device_lock);
746 core_scsi3_free_all_registrations(dev);
747 se_release_vpd_for_dev(dev);
752 void se_release_vpd_for_dev(struct se_device *dev)
754 struct t10_vpd *vpd, *vpd_tmp;
756 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
757 list_for_each_entry_safe(vpd, vpd_tmp,
758 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
759 list_del(&vpd->vpd_list);
762 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
765 /* se_free_virtual_device():
767 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
769 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
771 if (!list_empty(&dev->dev_sep_list))
774 core_alua_free_lu_gp_mem(dev);
775 se_release_device_for_hba(dev);
780 static void se_dev_start(struct se_device *dev)
782 struct se_hba *hba = dev->se_hba;
784 spin_lock(&hba->device_lock);
785 atomic_inc(&dev->dev_obj.obj_access_count);
786 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
787 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
788 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
789 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
790 } else if (dev->dev_status &
791 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
793 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
794 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
797 spin_unlock(&hba->device_lock);
800 static void se_dev_stop(struct se_device *dev)
802 struct se_hba *hba = dev->se_hba;
804 spin_lock(&hba->device_lock);
805 atomic_dec(&dev->dev_obj.obj_access_count);
806 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
807 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
808 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
809 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
810 } else if (dev->dev_status &
811 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
812 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
813 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
816 spin_unlock(&hba->device_lock);
819 int se_dev_check_online(struct se_device *dev)
824 spin_lock_irqsave(&dev->dev_status_lock, flags);
825 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
826 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
827 spin_unlock_irqrestore(&dev->dev_status_lock, flags);
832 int se_dev_check_shutdown(struct se_device *dev)
836 spin_lock_irq(&dev->dev_status_lock);
837 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
838 spin_unlock_irq(&dev->dev_status_lock);
843 u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
845 u32 tmp, aligned_max_sectors;
847 * Limit max_sectors to a PAGE_SIZE aligned value for modern
848 * transport_allocate_data_tasks() operation.
850 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
851 aligned_max_sectors = (tmp / block_size);
852 if (max_sectors != aligned_max_sectors) {
853 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
854 " to %u\n", max_sectors, aligned_max_sectors);
855 return aligned_max_sectors;
861 void se_dev_set_default_attribs(
862 struct se_device *dev,
863 struct se_dev_limits *dev_limits)
865 struct queue_limits *limits = &dev_limits->limits;
867 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
868 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
869 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
870 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
871 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
872 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
873 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
874 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
875 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
876 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
877 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
878 dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
879 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
881 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
882 * iblock_create_virtdevice() from struct queue_limits values
883 * if blk_queue_discard()==1
885 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
886 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
887 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
888 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
889 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
890 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
892 * block_size is based on subsystem plugin dependent requirements.
894 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
895 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
897 * max_sectors is based on subsystem plugin dependent requirements.
899 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
901 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
903 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
904 limits->logical_block_size);
905 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
907 * Set optimal_sectors from max_sectors, which can be lowered via
910 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
912 * queue_depth is based on subsystem plugin dependent requirements.
914 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
915 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
918 int se_dev_set_max_unmap_lba_count(
919 struct se_device *dev,
920 u32 max_unmap_lba_count)
922 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
923 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
924 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
928 int se_dev_set_max_unmap_block_desc_count(
929 struct se_device *dev,
930 u32 max_unmap_block_desc_count)
932 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
933 max_unmap_block_desc_count;
934 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
935 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
939 int se_dev_set_unmap_granularity(
940 struct se_device *dev,
941 u32 unmap_granularity)
943 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
944 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
945 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
949 int se_dev_set_unmap_granularity_alignment(
950 struct se_device *dev,
951 u32 unmap_granularity_alignment)
953 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
954 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
955 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
959 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
961 if (flag != 0 && flag != 1) {
962 pr_err("Illegal value %d\n", flag);
966 pr_err("dpo_emulated not supported\n");
970 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
972 if (flag != 0 && flag != 1) {
973 pr_err("Illegal value %d\n", flag);
977 if (dev->transport->fua_write_emulated == 0) {
978 pr_err("fua_write_emulated not supported\n");
981 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
982 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
983 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
987 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
989 if (flag != 0 && flag != 1) {
990 pr_err("Illegal value %d\n", flag);
994 pr_err("ua read emulated not supported\n");
998 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1000 if (flag != 0 && flag != 1) {
1001 pr_err("Illegal value %d\n", flag);
1004 if (dev->transport->write_cache_emulated == 0) {
1005 pr_err("write_cache_emulated not supported\n");
1008 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1009 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1010 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1014 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1016 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1017 pr_err("Illegal value %d\n", flag);
1021 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1022 pr_err("dev[%p]: Unable to change SE Device"
1023 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1025 atomic_read(&dev->dev_export_obj.obj_access_count));
1028 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1029 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1030 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1035 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1037 if ((flag != 0) && (flag != 1)) {
1038 pr_err("Illegal value %d\n", flag);
1042 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1043 pr_err("dev[%p]: Unable to change SE Device TAS while"
1044 " dev_export_obj: %d count exists\n", dev,
1045 atomic_read(&dev->dev_export_obj.obj_access_count));
1048 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1049 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1050 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1055 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1057 if ((flag != 0) && (flag != 1)) {
1058 pr_err("Illegal value %d\n", flag);
1062 * We expect this value to be non-zero when generic Block Layer
1063 * Discard supported is detected iblock_create_virtdevice().
1065 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1066 pr_err("Generic Block Discard not supported\n");
1070 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1071 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1076 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1078 if ((flag != 0) && (flag != 1)) {
1079 pr_err("Illegal value %d\n", flag);
1083 * We expect this value to be non-zero when generic Block Layer
1084 * Discard supported is detected iblock_create_virtdevice().
1086 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1087 pr_err("Generic Block Discard not supported\n");
1091 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1092 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1097 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1099 if ((flag != 0) && (flag != 1)) {
1100 pr_err("Illegal value %d\n", flag);
1103 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1104 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1105 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1109 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1111 if ((flag != 0) && (flag != 1)) {
1112 printk(KERN_ERR "Illegal value %d\n", flag);
1115 dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
1116 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1121 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1124 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1125 " reordering not implemented\n", dev);
1128 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
1129 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1134 * Note, this can only be called on unexported SE Device Object.
1136 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1138 u32 orig_queue_depth = dev->queue_depth;
1140 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1141 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1142 " dev_export_obj: %d count exists\n", dev,
1143 atomic_read(&dev->dev_export_obj.obj_access_count));
1147 pr_err("dev[%p]: Illegal ZERO value for queue"
1152 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1153 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1154 pr_err("dev[%p]: Passed queue_depth: %u"
1155 " exceeds TCM/SE_Device TCQ: %u\n",
1157 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1161 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1162 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1163 pr_err("dev[%p]: Passed queue_depth:"
1164 " %u exceeds TCM/SE_Device MAX"
1165 " TCQ: %u\n", dev, queue_depth,
1166 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1172 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1173 if (queue_depth > orig_queue_depth)
1174 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1175 else if (queue_depth < orig_queue_depth)
1176 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1178 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1183 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1185 int force = 0; /* Force setting for VDEVS */
1187 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1188 pr_err("dev[%p]: Unable to change SE Device"
1189 " max_sectors while dev_export_obj: %d count exists\n",
1190 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1194 pr_err("dev[%p]: Illegal ZERO value for"
1195 " max_sectors\n", dev);
1198 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1199 pr_err("dev[%p]: Passed max_sectors: %u less than"
1200 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1201 DA_STATUS_MAX_SECTORS_MIN);
1204 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1205 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1206 pr_err("dev[%p]: Passed max_sectors: %u"
1207 " greater than TCM/SE_Device max_sectors:"
1208 " %u\n", dev, max_sectors,
1209 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1213 if (!force && (max_sectors >
1214 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1215 pr_err("dev[%p]: Passed max_sectors: %u"
1216 " greater than TCM/SE_Device max_sectors"
1217 ": %u, use force=1 to override.\n", dev,
1218 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1221 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1222 pr_err("dev[%p]: Passed max_sectors: %u"
1223 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1224 " %u\n", dev, max_sectors,
1225 DA_STATUS_MAX_SECTORS_MAX);
1230 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1232 max_sectors = se_dev_align_max_sectors(max_sectors,
1233 dev->se_sub_dev->se_dev_attrib.block_size);
1235 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1236 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1241 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1243 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1244 pr_err("dev[%p]: Unable to change SE Device"
1245 " optimal_sectors while dev_export_obj: %d count exists\n",
1246 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1249 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1250 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1251 " changed for TCM/pSCSI\n", dev);
1254 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1255 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1256 " greater than max_sectors: %u\n", dev,
1257 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1261 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1262 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1263 dev, optimal_sectors);
1267 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1269 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1270 pr_err("dev[%p]: Unable to change SE Device block_size"
1271 " while dev_export_obj: %d count exists\n", dev,
1272 atomic_read(&dev->dev_export_obj.obj_access_count));
1276 if ((block_size != 512) &&
1277 (block_size != 1024) &&
1278 (block_size != 2048) &&
1279 (block_size != 4096)) {
1280 pr_err("dev[%p]: Illegal value for block_device: %u"
1281 " for SE device, must be 512, 1024, 2048 or 4096\n",
1286 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1287 pr_err("dev[%p]: Not allowed to change block_size for"
1288 " Physical Device, use for Linux/SCSI to change"
1289 " block_size for underlying hardware\n", dev);
1293 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1294 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1299 struct se_lun *core_dev_add_lun(
1300 struct se_portal_group *tpg,
1302 struct se_device *dev,
1305 struct se_lun *lun_p;
1308 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1309 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1310 atomic_read(&dev->dev_access_obj.obj_access_count));
1314 lun_p = core_tpg_pre_addlun(tpg, lun);
1315 if ((IS_ERR(lun_p)) || !lun_p)
1318 if (dev->dev_flags & DF_READ_ONLY)
1319 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1321 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1323 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1326 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1327 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1328 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1329 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1331 * Update LUN maps for dynamically added initiators when
1332 * generate_node_acl is enabled.
1334 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1335 struct se_node_acl *acl;
1336 spin_lock_irq(&tpg->acl_node_lock);
1337 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1338 if (acl->dynamic_node_acl &&
1339 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1340 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1341 spin_unlock_irq(&tpg->acl_node_lock);
1342 core_tpg_add_node_to_devs(acl, tpg);
1343 spin_lock_irq(&tpg->acl_node_lock);
1346 spin_unlock_irq(&tpg->acl_node_lock);
1352 /* core_dev_del_lun():
1356 int core_dev_del_lun(
1357 struct se_portal_group *tpg,
1363 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1367 core_tpg_post_dellun(tpg, lun);
1369 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1370 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1371 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1372 tpg->se_tpg_tfo->get_fabric_name());
1377 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1381 spin_lock(&tpg->tpg_lun_lock);
1382 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1383 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1384 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1385 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1386 TRANSPORT_MAX_LUNS_PER_TPG-1,
1387 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1388 spin_unlock(&tpg->tpg_lun_lock);
1391 lun = &tpg->tpg_lun_list[unpacked_lun];
1393 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1394 pr_err("%s Logical Unit Number: %u is not free on"
1395 " Target Portal Group: %hu, ignoring request.\n",
1396 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1397 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1398 spin_unlock(&tpg->tpg_lun_lock);
1401 spin_unlock(&tpg->tpg_lun_lock);
1406 /* core_dev_get_lun():
1410 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1414 spin_lock(&tpg->tpg_lun_lock);
1415 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1416 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1417 "_TPG-1: %u for Target Portal Group: %hu\n",
1418 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1419 TRANSPORT_MAX_LUNS_PER_TPG-1,
1420 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1421 spin_unlock(&tpg->tpg_lun_lock);
1424 lun = &tpg->tpg_lun_list[unpacked_lun];
1426 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1427 pr_err("%s Logical Unit Number: %u is not active on"
1428 " Target Portal Group: %hu, ignoring request.\n",
1429 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1430 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1431 spin_unlock(&tpg->tpg_lun_lock);
1434 spin_unlock(&tpg->tpg_lun_lock);
1439 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1440 struct se_portal_group *tpg,
1442 char *initiatorname,
1445 struct se_lun_acl *lacl;
1446 struct se_node_acl *nacl;
1448 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1449 pr_err("%s InitiatorName exceeds maximum size.\n",
1450 tpg->se_tpg_tfo->get_fabric_name());
1454 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1459 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1461 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1466 INIT_LIST_HEAD(&lacl->lacl_list);
1467 lacl->mapped_lun = mapped_lun;
1468 lacl->se_lun_nacl = nacl;
1469 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1474 int core_dev_add_initiator_node_lun_acl(
1475 struct se_portal_group *tpg,
1476 struct se_lun_acl *lacl,
1481 struct se_node_acl *nacl;
1483 lun = core_dev_get_lun(tpg, unpacked_lun);
1485 pr_err("%s Logical Unit Number: %u is not active on"
1486 " Target Portal Group: %hu, ignoring request.\n",
1487 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1488 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1492 nacl = lacl->se_lun_nacl;
1496 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1497 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1498 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1502 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1503 lun_access, nacl, tpg, 1) < 0)
1506 spin_lock(&lun->lun_acl_lock);
1507 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1508 atomic_inc(&lun->lun_acl_count);
1509 smp_mb__after_atomic_inc();
1510 spin_unlock(&lun->lun_acl_lock);
1512 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1513 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1514 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1515 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1516 lacl->initiatorname);
1518 * Check to see if there are any existing persistent reservation APTPL
1519 * pre-registrations that need to be enabled for this LUN ACL..
1521 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1525 /* core_dev_del_initiator_node_lun_acl():
1529 int core_dev_del_initiator_node_lun_acl(
1530 struct se_portal_group *tpg,
1532 struct se_lun_acl *lacl)
1534 struct se_node_acl *nacl;
1536 nacl = lacl->se_lun_nacl;
1540 spin_lock(&lun->lun_acl_lock);
1541 list_del(&lacl->lacl_list);
1542 atomic_dec(&lun->lun_acl_count);
1543 smp_mb__after_atomic_dec();
1544 spin_unlock(&lun->lun_acl_lock);
1546 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1547 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1549 lacl->se_lun = NULL;
1551 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1552 " InitiatorNode: %s Mapped LUN: %u\n",
1553 tpg->se_tpg_tfo->get_fabric_name(),
1554 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1555 lacl->initiatorname, lacl->mapped_lun);
1560 void core_dev_free_initiator_node_lun_acl(
1561 struct se_portal_group *tpg,
1562 struct se_lun_acl *lacl)
1564 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1565 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1566 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1567 tpg->se_tpg_tfo->get_fabric_name(),
1568 lacl->initiatorname, lacl->mapped_lun);
1573 int core_dev_setup_virtual_lun0(void)
1576 struct se_device *dev;
1577 struct se_subsystem_dev *se_dev = NULL;
1578 struct se_subsystem_api *t;
1582 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1584 return PTR_ERR(hba);
1589 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1591 pr_err("Unable to allocate memory for"
1592 " struct se_subsystem_dev\n");
1596 INIT_LIST_HEAD(&se_dev->se_dev_node);
1597 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1598 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1599 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1600 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1601 spin_lock_init(&se_dev->t10_pr.registration_lock);
1602 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1603 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1604 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1605 spin_lock_init(&se_dev->se_dev_lock);
1606 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1607 se_dev->t10_wwn.t10_sub_dev = se_dev;
1608 se_dev->t10_alua.t10_sub_dev = se_dev;
1609 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1610 se_dev->se_dev_hba = hba;
1612 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1613 if (!se_dev->se_dev_su_ptr) {
1614 pr_err("Unable to locate subsystem dependent pointer"
1615 " from allocate_virtdevice()\n");
1619 lun0_su_dev = se_dev;
1622 sprintf(buf, "rd_pages=8");
1623 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1625 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1630 se_dev->se_dev_ptr = dev;
1638 core_delete_hba(lun0_hba);
1645 void core_dev_release_virtual_lun0(void)
1647 struct se_hba *hba = lun0_hba;
1648 struct se_subsystem_dev *su_dev = lun0_su_dev;
1654 se_free_virtual_device(g_lun0_dev, hba);
1657 core_delete_hba(hba);