29dfdf66bbc520cca8f14f3c7bcbd34e2ff955e4
[firefly-linux-kernel-4.4.55.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
52
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_UPAGES 2048
61 #define TCM_VHOST_PREALLOC_PROT_SGLS 512
62
63 struct vhost_scsi_inflight {
64         /* Wait for the flush operation to finish */
65         struct completion comp;
66         /* Refcount for the inflight reqs */
67         struct kref kref;
68 };
69
70 struct tcm_vhost_cmd {
71         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72         int tvc_vq_desc;
73         /* virtio-scsi initiator task attribute */
74         int tvc_task_attr;
75         /* virtio-scsi response incoming iovecs */
76         int tvc_in_iovs;
77         /* virtio-scsi initiator data direction */
78         enum dma_data_direction tvc_data_direction;
79         /* Expected data transfer length from virtio-scsi header */
80         u32 tvc_exp_data_len;
81         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
82         u64 tvc_tag;
83         /* The number of scatterlists associated with this cmd */
84         u32 tvc_sgl_count;
85         u32 tvc_prot_sgl_count;
86         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
87         u32 tvc_lun;
88         /* Pointer to the SGL formatted memory from virtio-scsi */
89         struct scatterlist *tvc_sgl;
90         struct scatterlist *tvc_prot_sgl;
91         struct page **tvc_upages;
92         /* Pointer to response header iovec */
93         struct iovec *tvc_resp_iov;
94         /* Pointer to vhost_scsi for our device */
95         struct vhost_scsi *tvc_vhost;
96         /* Pointer to vhost_virtqueue for the cmd */
97         struct vhost_virtqueue *tvc_vq;
98         /* Pointer to vhost nexus memory */
99         struct tcm_vhost_nexus *tvc_nexus;
100         /* The TCM I/O descriptor that is accessed via container_of() */
101         struct se_cmd tvc_se_cmd;
102         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
103         struct work_struct work;
104         /* Copy of the incoming SCSI command descriptor block (CDB) */
105         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
106         /* Sense buffer that will be mapped into outgoing status */
107         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
108         /* Completed commands list, serviced from vhost worker thread */
109         struct llist_node tvc_completion_list;
110         /* Used to track inflight cmd */
111         struct vhost_scsi_inflight *inflight;
112 };
113
114 struct tcm_vhost_nexus {
115         /* Pointer to TCM session for I_T Nexus */
116         struct se_session *tvn_se_sess;
117 };
118
119 struct tcm_vhost_nacl {
120         /* Binary World Wide unique Port Name for Vhost Initiator port */
121         u64 iport_wwpn;
122         /* ASCII formatted WWPN for Sas Initiator port */
123         char iport_name[TCM_VHOST_NAMELEN];
124         /* Returned by tcm_vhost_make_nodeacl() */
125         struct se_node_acl se_node_acl;
126 };
127
128 struct tcm_vhost_tpg {
129         /* Vhost port target portal group tag for TCM */
130         u16 tport_tpgt;
131         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
132         int tv_tpg_port_count;
133         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
134         int tv_tpg_vhost_count;
135         /* list for tcm_vhost_list */
136         struct list_head tv_tpg_list;
137         /* Used to protect access for tpg_nexus */
138         struct mutex tv_tpg_mutex;
139         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
140         struct tcm_vhost_nexus *tpg_nexus;
141         /* Pointer back to tcm_vhost_tport */
142         struct tcm_vhost_tport *tport;
143         /* Returned by tcm_vhost_make_tpg() */
144         struct se_portal_group se_tpg;
145         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
146         struct vhost_scsi *vhost_scsi;
147 };
148
149 struct tcm_vhost_tport {
150         /* SCSI protocol the tport is providing */
151         u8 tport_proto_id;
152         /* Binary World Wide unique Port Name for Vhost Target port */
153         u64 tport_wwpn;
154         /* ASCII formatted WWPN for Vhost Target port */
155         char tport_name[TCM_VHOST_NAMELEN];
156         /* Returned by tcm_vhost_make_tport() */
157         struct se_wwn tport_wwn;
158 };
159
160 struct tcm_vhost_evt {
161         /* event to be sent to guest */
162         struct virtio_scsi_event event;
163         /* event list, serviced from vhost worker thread */
164         struct llist_node list;
165 };
166
167 enum {
168         VHOST_SCSI_VQ_CTL = 0,
169         VHOST_SCSI_VQ_EVT = 1,
170         VHOST_SCSI_VQ_IO = 2,
171 };
172
173 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
174 enum {
175         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
176                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
177 };
178
179 #define VHOST_SCSI_MAX_TARGET   256
180 #define VHOST_SCSI_MAX_VQ       128
181 #define VHOST_SCSI_MAX_EVENT    128
182
183 struct vhost_scsi_virtqueue {
184         struct vhost_virtqueue vq;
185         /*
186          * Reference counting for inflight reqs, used for flush operation. At
187          * each time, one reference tracks new commands submitted, while we
188          * wait for another one to reach 0.
189          */
190         struct vhost_scsi_inflight inflights[2];
191         /*
192          * Indicate current inflight in use, protected by vq->mutex.
193          * Writers must also take dev mutex and flush under it.
194          */
195         int inflight_idx;
196 };
197
198 struct vhost_scsi {
199         /* Protected by vhost_scsi->dev.mutex */
200         struct tcm_vhost_tpg **vs_tpg;
201         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
202
203         struct vhost_dev dev;
204         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
205
206         struct vhost_work vs_completion_work; /* cmd completion work item */
207         struct llist_head vs_completion_list; /* cmd completion queue */
208
209         struct vhost_work vs_event_work; /* evt injection work item */
210         struct llist_head vs_event_list; /* evt injection queue */
211
212         bool vs_events_missed; /* any missed events, protected by vq->mutex */
213         int vs_events_nr; /* num of pending events, protected by vq->mutex */
214 };
215
216 /* Local pointer to allocated TCM configfs fabric module */
217 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
218
219 static struct workqueue_struct *tcm_vhost_workqueue;
220
221 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
222 static DEFINE_MUTEX(tcm_vhost_mutex);
223 static LIST_HEAD(tcm_vhost_list);
224
225 static int iov_num_pages(struct iovec *iov)
226 {
227         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
228                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
229 }
230
231 static void tcm_vhost_done_inflight(struct kref *kref)
232 {
233         struct vhost_scsi_inflight *inflight;
234
235         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
236         complete(&inflight->comp);
237 }
238
239 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
240                                     struct vhost_scsi_inflight *old_inflight[])
241 {
242         struct vhost_scsi_inflight *new_inflight;
243         struct vhost_virtqueue *vq;
244         int idx, i;
245
246         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
247                 vq = &vs->vqs[i].vq;
248
249                 mutex_lock(&vq->mutex);
250
251                 /* store old infight */
252                 idx = vs->vqs[i].inflight_idx;
253                 if (old_inflight)
254                         old_inflight[i] = &vs->vqs[i].inflights[idx];
255
256                 /* setup new infight */
257                 vs->vqs[i].inflight_idx = idx ^ 1;
258                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
259                 kref_init(&new_inflight->kref);
260                 init_completion(&new_inflight->comp);
261
262                 mutex_unlock(&vq->mutex);
263         }
264 }
265
266 static struct vhost_scsi_inflight *
267 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
268 {
269         struct vhost_scsi_inflight *inflight;
270         struct vhost_scsi_virtqueue *svq;
271
272         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
273         inflight = &svq->inflights[svq->inflight_idx];
274         kref_get(&inflight->kref);
275
276         return inflight;
277 }
278
279 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
280 {
281         kref_put(&inflight->kref, tcm_vhost_done_inflight);
282 }
283
284 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
285 {
286         return 1;
287 }
288
289 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
290 {
291         return 0;
292 }
293
294 static char *tcm_vhost_get_fabric_name(void)
295 {
296         return "vhost";
297 }
298
299 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
300 {
301         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
302                                 struct tcm_vhost_tpg, se_tpg);
303         struct tcm_vhost_tport *tport = tpg->tport;
304
305         switch (tport->tport_proto_id) {
306         case SCSI_PROTOCOL_SAS:
307                 return sas_get_fabric_proto_ident(se_tpg);
308         case SCSI_PROTOCOL_FCP:
309                 return fc_get_fabric_proto_ident(se_tpg);
310         case SCSI_PROTOCOL_ISCSI:
311                 return iscsi_get_fabric_proto_ident(se_tpg);
312         default:
313                 pr_err("Unknown tport_proto_id: 0x%02x, using"
314                         " SAS emulation\n", tport->tport_proto_id);
315                 break;
316         }
317
318         return sas_get_fabric_proto_ident(se_tpg);
319 }
320
321 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
322 {
323         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
324                                 struct tcm_vhost_tpg, se_tpg);
325         struct tcm_vhost_tport *tport = tpg->tport;
326
327         return &tport->tport_name[0];
328 }
329
330 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
331 {
332         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
333                                 struct tcm_vhost_tpg, se_tpg);
334         return tpg->tport_tpgt;
335 }
336
337 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
338 {
339         return 1;
340 }
341
342 static u32
343 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
344                               struct se_node_acl *se_nacl,
345                               struct t10_pr_registration *pr_reg,
346                               int *format_code,
347                               unsigned char *buf)
348 {
349         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
350                                 struct tcm_vhost_tpg, se_tpg);
351         struct tcm_vhost_tport *tport = tpg->tport;
352
353         switch (tport->tport_proto_id) {
354         case SCSI_PROTOCOL_SAS:
355                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
356                                         format_code, buf);
357         case SCSI_PROTOCOL_FCP:
358                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
359                                         format_code, buf);
360         case SCSI_PROTOCOL_ISCSI:
361                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
362                                         format_code, buf);
363         default:
364                 pr_err("Unknown tport_proto_id: 0x%02x, using"
365                         " SAS emulation\n", tport->tport_proto_id);
366                 break;
367         }
368
369         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
370                         format_code, buf);
371 }
372
373 static u32
374 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
375                                   struct se_node_acl *se_nacl,
376                                   struct t10_pr_registration *pr_reg,
377                                   int *format_code)
378 {
379         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
380                                 struct tcm_vhost_tpg, se_tpg);
381         struct tcm_vhost_tport *tport = tpg->tport;
382
383         switch (tport->tport_proto_id) {
384         case SCSI_PROTOCOL_SAS:
385                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
386                                         format_code);
387         case SCSI_PROTOCOL_FCP:
388                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
389                                         format_code);
390         case SCSI_PROTOCOL_ISCSI:
391                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
392                                         format_code);
393         default:
394                 pr_err("Unknown tport_proto_id: 0x%02x, using"
395                         " SAS emulation\n", tport->tport_proto_id);
396                 break;
397         }
398
399         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
400                         format_code);
401 }
402
403 static char *
404 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
405                                     const char *buf,
406                                     u32 *out_tid_len,
407                                     char **port_nexus_ptr)
408 {
409         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
410                                 struct tcm_vhost_tpg, se_tpg);
411         struct tcm_vhost_tport *tport = tpg->tport;
412
413         switch (tport->tport_proto_id) {
414         case SCSI_PROTOCOL_SAS:
415                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
416                                         port_nexus_ptr);
417         case SCSI_PROTOCOL_FCP:
418                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
419                                         port_nexus_ptr);
420         case SCSI_PROTOCOL_ISCSI:
421                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
422                                         port_nexus_ptr);
423         default:
424                 pr_err("Unknown tport_proto_id: 0x%02x, using"
425                         " SAS emulation\n", tport->tport_proto_id);
426                 break;
427         }
428
429         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
430                         port_nexus_ptr);
431 }
432
433 static struct se_node_acl *
434 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
435 {
436         struct tcm_vhost_nacl *nacl;
437
438         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
439         if (!nacl) {
440                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
441                 return NULL;
442         }
443
444         return &nacl->se_node_acl;
445 }
446
447 static void
448 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
449                              struct se_node_acl *se_nacl)
450 {
451         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
452                         struct tcm_vhost_nacl, se_node_acl);
453         kfree(nacl);
454 }
455
456 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
457 {
458         return 1;
459 }
460
461 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
462 {
463         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
464                                 struct tcm_vhost_cmd, tvc_se_cmd);
465         struct se_session *se_sess = se_cmd->se_sess;
466         int i;
467
468         if (tv_cmd->tvc_sgl_count) {
469                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
470                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
471         }
472         if (tv_cmd->tvc_prot_sgl_count) {
473                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
474                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
475         }
476
477         tcm_vhost_put_inflight(tv_cmd->inflight);
478         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
479 }
480
481 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
482 {
483         return 0;
484 }
485
486 static void tcm_vhost_close_session(struct se_session *se_sess)
487 {
488         return;
489 }
490
491 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
492 {
493         return 0;
494 }
495
496 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
497 {
498         /* Go ahead and process the write immediately */
499         target_execute_cmd(se_cmd);
500         return 0;
501 }
502
503 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
504 {
505         return 0;
506 }
507
508 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
509 {
510         return;
511 }
512
513 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
514 {
515         return 0;
516 }
517
518 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
519 {
520         return 0;
521 }
522
523 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
524 {
525         struct vhost_scsi *vs = cmd->tvc_vhost;
526
527         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
528
529         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
530 }
531
532 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
533 {
534         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
535                                 struct tcm_vhost_cmd, tvc_se_cmd);
536         vhost_scsi_complete_cmd(cmd);
537         return 0;
538 }
539
540 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
541 {
542         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
543                                 struct tcm_vhost_cmd, tvc_se_cmd);
544         vhost_scsi_complete_cmd(cmd);
545         return 0;
546 }
547
548 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
549 {
550         return;
551 }
552
553 static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
554 {
555         return;
556 }
557
558 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
559 {
560         vs->vs_events_nr--;
561         kfree(evt);
562 }
563
564 static struct tcm_vhost_evt *
565 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
566                        u32 event, u32 reason)
567 {
568         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
569         struct tcm_vhost_evt *evt;
570
571         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
572                 vs->vs_events_missed = true;
573                 return NULL;
574         }
575
576         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
577         if (!evt) {
578                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
579                 vs->vs_events_missed = true;
580                 return NULL;
581         }
582
583         evt->event.event = cpu_to_vhost32(vq, event);
584         evt->event.reason = cpu_to_vhost32(vq, reason);
585         vs->vs_events_nr++;
586
587         return evt;
588 }
589
590 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
591 {
592         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
593
594         /* TODO locking against target/backend threads? */
595         transport_generic_free_cmd(se_cmd, 0);
596
597 }
598
599 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
600 {
601         return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
602 }
603
604 static void
605 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
606 {
607         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
608         struct virtio_scsi_event *event = &evt->event;
609         struct virtio_scsi_event __user *eventp;
610         unsigned out, in;
611         int head, ret;
612
613         if (!vq->private_data) {
614                 vs->vs_events_missed = true;
615                 return;
616         }
617
618 again:
619         vhost_disable_notify(&vs->dev, vq);
620         head = vhost_get_vq_desc(vq, vq->iov,
621                         ARRAY_SIZE(vq->iov), &out, &in,
622                         NULL, NULL);
623         if (head < 0) {
624                 vs->vs_events_missed = true;
625                 return;
626         }
627         if (head == vq->num) {
628                 if (vhost_enable_notify(&vs->dev, vq))
629                         goto again;
630                 vs->vs_events_missed = true;
631                 return;
632         }
633
634         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
635                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
636                                 vq->iov[out].iov_len);
637                 vs->vs_events_missed = true;
638                 return;
639         }
640
641         if (vs->vs_events_missed) {
642                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
643                 vs->vs_events_missed = false;
644         }
645
646         eventp = vq->iov[out].iov_base;
647         ret = __copy_to_user(eventp, event, sizeof(*event));
648         if (!ret)
649                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
650         else
651                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
652 }
653
654 static void tcm_vhost_evt_work(struct vhost_work *work)
655 {
656         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
657                                         vs_event_work);
658         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
659         struct tcm_vhost_evt *evt;
660         struct llist_node *llnode;
661
662         mutex_lock(&vq->mutex);
663         llnode = llist_del_all(&vs->vs_event_list);
664         while (llnode) {
665                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
666                 llnode = llist_next(llnode);
667                 tcm_vhost_do_evt_work(vs, evt);
668                 tcm_vhost_free_evt(vs, evt);
669         }
670         mutex_unlock(&vq->mutex);
671 }
672
673 /* Fill in status and signal that we are done processing this command
674  *
675  * This is scheduled in the vhost work queue so we are called with the owner
676  * process mm and can access the vring.
677  */
678 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
679 {
680         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
681                                         vs_completion_work);
682         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
683         struct virtio_scsi_cmd_resp v_rsp;
684         struct tcm_vhost_cmd *cmd;
685         struct llist_node *llnode;
686         struct se_cmd *se_cmd;
687         struct iov_iter iov_iter;
688         int ret, vq;
689
690         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
691         llnode = llist_del_all(&vs->vs_completion_list);
692         while (llnode) {
693                 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
694                                      tvc_completion_list);
695                 llnode = llist_next(llnode);
696                 se_cmd = &cmd->tvc_se_cmd;
697
698                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
699                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
700
701                 memset(&v_rsp, 0, sizeof(v_rsp));
702                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
703                 /* TODO is status_qualifier field needed? */
704                 v_rsp.status = se_cmd->scsi_status;
705                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
706                                                  se_cmd->scsi_sense_length);
707                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
708                        se_cmd->scsi_sense_length);
709
710                 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
711                               cmd->tvc_in_iovs, sizeof(v_rsp));
712                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
713                 if (likely(ret == sizeof(v_rsp))) {
714                         struct vhost_scsi_virtqueue *q;
715                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
716                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
717                         vq = q - vs->vqs;
718                         __set_bit(vq, signal);
719                 } else
720                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
721
722                 vhost_scsi_free_cmd(cmd);
723         }
724
725         vq = -1;
726         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
727                 < VHOST_SCSI_MAX_VQ)
728                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
729 }
730
731 static struct tcm_vhost_cmd *
732 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
733                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
734                    u32 exp_data_len, int data_direction)
735 {
736         struct tcm_vhost_cmd *cmd;
737         struct tcm_vhost_nexus *tv_nexus;
738         struct se_session *se_sess;
739         struct scatterlist *sg, *prot_sg;
740         struct page **pages;
741         int tag;
742
743         tv_nexus = tpg->tpg_nexus;
744         if (!tv_nexus) {
745                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
746                 return ERR_PTR(-EIO);
747         }
748         se_sess = tv_nexus->tvn_se_sess;
749
750         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
751         if (tag < 0) {
752                 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
753                 return ERR_PTR(-ENOMEM);
754         }
755
756         cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
757         sg = cmd->tvc_sgl;
758         prot_sg = cmd->tvc_prot_sgl;
759         pages = cmd->tvc_upages;
760         memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
761
762         cmd->tvc_sgl = sg;
763         cmd->tvc_prot_sgl = prot_sg;
764         cmd->tvc_upages = pages;
765         cmd->tvc_se_cmd.map_tag = tag;
766         cmd->tvc_tag = scsi_tag;
767         cmd->tvc_lun = lun;
768         cmd->tvc_task_attr = task_attr;
769         cmd->tvc_exp_data_len = exp_data_len;
770         cmd->tvc_data_direction = data_direction;
771         cmd->tvc_nexus = tv_nexus;
772         cmd->inflight = tcm_vhost_get_inflight(vq);
773
774         memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
775
776         return cmd;
777 }
778
779 /*
780  * Map a user memory range into a scatterlist
781  *
782  * Returns the number of scatterlist entries used or -errno on error.
783  */
784 static int
785 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
786                       struct scatterlist *sgl,
787                       unsigned int sgl_count,
788                       struct iovec *iov,
789                       struct page **pages,
790                       bool write)
791 {
792         unsigned int npages = 0, pages_nr, offset, nbytes;
793         struct scatterlist *sg = sgl;
794         void __user *ptr = iov->iov_base;
795         size_t len = iov->iov_len;
796         int ret, i;
797
798         pages_nr = iov_num_pages(iov);
799         if (pages_nr > sgl_count) {
800                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
801                        " sgl_count: %u\n", pages_nr, sgl_count);
802                 return -ENOBUFS;
803         }
804         if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
805                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
806                        " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
807                         pages_nr, TCM_VHOST_PREALLOC_UPAGES);
808                 return -ENOBUFS;
809         }
810
811         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
812         /* No pages were pinned */
813         if (ret < 0)
814                 goto out;
815         /* Less pages pinned than wanted */
816         if (ret != pages_nr) {
817                 for (i = 0; i < ret; i++)
818                         put_page(pages[i]);
819                 ret = -EFAULT;
820                 goto out;
821         }
822
823         while (len > 0) {
824                 offset = (uintptr_t)ptr & ~PAGE_MASK;
825                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
826                 sg_set_page(sg, pages[npages], nbytes, offset);
827                 ptr += nbytes;
828                 len -= nbytes;
829                 sg++;
830                 npages++;
831         }
832
833 out:
834         return ret;
835 }
836
837 static int
838 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
839                           struct iovec *iov,
840                           int niov,
841                           bool write)
842 {
843         struct scatterlist *sg = cmd->tvc_sgl;
844         unsigned int sgl_count = 0;
845         int ret, i;
846
847         for (i = 0; i < niov; i++)
848                 sgl_count += iov_num_pages(&iov[i]);
849
850         if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
851                 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
852                         " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
853                         sgl_count, TCM_VHOST_PREALLOC_SGLS);
854                 return -ENOBUFS;
855         }
856
857         pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
858         sg_init_table(sg, sgl_count);
859         cmd->tvc_sgl_count = sgl_count;
860
861         pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
862
863         for (i = 0; i < niov; i++) {
864                 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
865                                             cmd->tvc_upages, write);
866                 if (ret < 0) {
867                         for (i = 0; i < cmd->tvc_sgl_count; i++)
868                                 put_page(sg_page(&cmd->tvc_sgl[i]));
869
870                         cmd->tvc_sgl_count = 0;
871                         return ret;
872                 }
873                 sg += ret;
874                 sgl_count -= ret;
875         }
876         return 0;
877 }
878
879 static int
880 vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
881                            struct iovec *iov,
882                            int niov,
883                            bool write)
884 {
885         struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
886         unsigned int prot_sgl_count = 0;
887         int ret, i;
888
889         for (i = 0; i < niov; i++)
890                 prot_sgl_count += iov_num_pages(&iov[i]);
891
892         if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
893                 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
894                         " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
895                         prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
896                 return -ENOBUFS;
897         }
898
899         pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
900                  prot_sg, prot_sgl_count);
901         sg_init_table(prot_sg, prot_sgl_count);
902         cmd->tvc_prot_sgl_count = prot_sgl_count;
903
904         for (i = 0; i < niov; i++) {
905                 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
906                                             cmd->tvc_upages, write);
907                 if (ret < 0) {
908                         for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
909                                 put_page(sg_page(&cmd->tvc_prot_sgl[i]));
910
911                         cmd->tvc_prot_sgl_count = 0;
912                         return ret;
913                 }
914                 prot_sg += ret;
915                 prot_sgl_count -= ret;
916         }
917         return 0;
918 }
919
920 static void tcm_vhost_submission_work(struct work_struct *work)
921 {
922         struct tcm_vhost_cmd *cmd =
923                 container_of(work, struct tcm_vhost_cmd, work);
924         struct tcm_vhost_nexus *tv_nexus;
925         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
926         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
927         int rc;
928
929         /* FIXME: BIDI operation */
930         if (cmd->tvc_sgl_count) {
931                 sg_ptr = cmd->tvc_sgl;
932
933                 if (cmd->tvc_prot_sgl_count)
934                         sg_prot_ptr = cmd->tvc_prot_sgl;
935                 else
936                         se_cmd->prot_pto = true;
937         } else {
938                 sg_ptr = NULL;
939         }
940         tv_nexus = cmd->tvc_nexus;
941
942         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
943                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
944                         cmd->tvc_lun, cmd->tvc_exp_data_len,
945                         cmd->tvc_task_attr, cmd->tvc_data_direction,
946                         TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
947                         NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
948         if (rc < 0) {
949                 transport_send_check_condition_and_sense(se_cmd,
950                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
951                 transport_generic_free_cmd(se_cmd, 0);
952         }
953 }
954
955 static void
956 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
957                            struct vhost_virtqueue *vq,
958                            int head, unsigned out)
959 {
960         struct virtio_scsi_cmd_resp __user *resp;
961         struct virtio_scsi_cmd_resp rsp;
962         int ret;
963
964         memset(&rsp, 0, sizeof(rsp));
965         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
966         resp = vq->iov[out].iov_base;
967         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
968         if (!ret)
969                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
970         else
971                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
972 }
973
974 static void
975 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
976 {
977         struct tcm_vhost_tpg **vs_tpg;
978         struct virtio_scsi_cmd_req v_req;
979         struct virtio_scsi_cmd_req_pi v_req_pi;
980         struct tcm_vhost_tpg *tpg;
981         struct tcm_vhost_cmd *cmd;
982         u64 tag;
983         u32 exp_data_len, data_first, data_num, data_direction, prot_first;
984         unsigned out, in, i;
985         int head, ret, data_niov, prot_niov, prot_bytes;
986         size_t req_size;
987         u16 lun;
988         u8 *target, *lunp, task_attr;
989         bool hdr_pi;
990         void *req, *cdb;
991
992         mutex_lock(&vq->mutex);
993         /*
994          * We can handle the vq only after the endpoint is setup by calling the
995          * VHOST_SCSI_SET_ENDPOINT ioctl.
996          */
997         vs_tpg = vq->private_data;
998         if (!vs_tpg)
999                 goto out;
1000
1001         vhost_disable_notify(&vs->dev, vq);
1002
1003         for (;;) {
1004                 head = vhost_get_vq_desc(vq, vq->iov,
1005                                         ARRAY_SIZE(vq->iov), &out, &in,
1006                                         NULL, NULL);
1007                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1008                                         head, out, in);
1009                 /* On error, stop handling until the next kick. */
1010                 if (unlikely(head < 0))
1011                         break;
1012                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
1013                 if (head == vq->num) {
1014                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1015                                 vhost_disable_notify(&vs->dev, vq);
1016                                 continue;
1017                         }
1018                         break;
1019                 }
1020
1021                 /* FIXME: BIDI operation */
1022                 if (out == 1 && in == 1) {
1023                         data_direction = DMA_NONE;
1024                         data_first = 0;
1025                         data_num = 0;
1026                 } else if (out == 1 && in > 1) {
1027                         data_direction = DMA_FROM_DEVICE;
1028                         data_first = out + 1;
1029                         data_num = in - 1;
1030                 } else if (out > 1 && in == 1) {
1031                         data_direction = DMA_TO_DEVICE;
1032                         data_first = 1;
1033                         data_num = out - 1;
1034                 } else {
1035                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
1036                                         out, in);
1037                         break;
1038                 }
1039
1040                 /*
1041                  * Check for a sane resp buffer so we can report errors to
1042                  * the guest.
1043                  */
1044                 if (unlikely(vq->iov[out].iov_len !=
1045                                         sizeof(struct virtio_scsi_cmd_resp))) {
1046                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
1047                                 " bytes\n", vq->iov[out].iov_len);
1048                         break;
1049                 }
1050
1051                 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
1052                         req = &v_req_pi;
1053                         lunp = &v_req_pi.lun[0];
1054                         target = &v_req_pi.lun[1];
1055                         req_size = sizeof(v_req_pi);
1056                         hdr_pi = true;
1057                 } else {
1058                         req = &v_req;
1059                         lunp = &v_req.lun[0];
1060                         target = &v_req.lun[1];
1061                         req_size = sizeof(v_req);
1062                         hdr_pi = false;
1063                 }
1064
1065                 if (unlikely(vq->iov[0].iov_len < req_size)) {
1066                         pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
1067                                req_size, vq->iov[0].iov_len);
1068                         break;
1069                 }
1070                 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
1071                 if (unlikely(ret)) {
1072                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1073                         break;
1074                 }
1075
1076                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1077                 if (unlikely(*lunp != 1)) {
1078                         vhost_scsi_send_bad_target(vs, vq, head, out);
1079                         continue;
1080                 }
1081
1082                 tpg = ACCESS_ONCE(vs_tpg[*target]);
1083
1084                 /* Target does not exist, fail the request */
1085                 if (unlikely(!tpg)) {
1086                         vhost_scsi_send_bad_target(vs, vq, head, out);
1087                         continue;
1088                 }
1089
1090                 data_niov = data_num;
1091                 prot_niov = prot_first = prot_bytes = 0;
1092                 /*
1093                  * Determine if any protection information iovecs are preceeding
1094                  * the actual data payload, and adjust data_first + data_niov
1095                  * values accordingly for vhost_scsi_map_iov_to_sgl() below.
1096                  *
1097                  * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
1098                  */
1099                 if (hdr_pi) {
1100                         if (v_req_pi.pi_bytesout) {
1101                                 if (data_direction != DMA_TO_DEVICE) {
1102                                         vq_err(vq, "Received non zero do_pi_niov"
1103                                                 ", but wrong data_direction\n");
1104                                         goto err_cmd;
1105                                 }
1106                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1107                         } else if (v_req_pi.pi_bytesin) {
1108                                 if (data_direction != DMA_FROM_DEVICE) {
1109                                         vq_err(vq, "Received non zero di_pi_niov"
1110                                                 ", but wrong data_direction\n");
1111                                         goto err_cmd;
1112                                 }
1113                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1114                         }
1115                         if (prot_bytes) {
1116                                 int tmp = 0;
1117
1118                                 for (i = 0; i < data_num; i++) {
1119                                         tmp += vq->iov[data_first + i].iov_len;
1120                                         prot_niov++;
1121                                         if (tmp >= prot_bytes)
1122                                                 break;
1123                                 }
1124                                 prot_first = data_first;
1125                                 data_first += prot_niov;
1126                                 data_niov = data_num - prot_niov;
1127                         }
1128                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
1129                         task_attr = v_req_pi.task_attr;
1130                         cdb = &v_req_pi.cdb[0];
1131                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1132                 } else {
1133                         tag = vhost64_to_cpu(vq, v_req.tag);
1134                         task_attr = v_req.task_attr;
1135                         cdb = &v_req.cdb[0];
1136                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1137                 }
1138                 exp_data_len = 0;
1139                 for (i = 0; i < data_niov; i++)
1140                         exp_data_len += vq->iov[data_first + i].iov_len;
1141                 /*
1142                  * Check that the recieved CDB size does not exceeded our
1143                  * hardcoded max for vhost-scsi
1144                  *
1145                  * TODO what if cdb was too small for varlen cdb header?
1146                  */
1147                 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
1148                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1149                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1150                                 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
1151                         goto err_cmd;
1152                 }
1153
1154                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1155                                          exp_data_len + prot_bytes,
1156                                          data_direction);
1157                 if (IS_ERR(cmd)) {
1158                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1159                                         PTR_ERR(cmd));
1160                         goto err_cmd;
1161                 }
1162
1163                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1164                         ": %d\n", cmd, exp_data_len, data_direction);
1165
1166                 cmd->tvc_vhost = vs;
1167                 cmd->tvc_vq = vq;
1168                 cmd->tvc_resp_iov = &vq->iov[out];
1169                 cmd->tvc_in_iovs = in;
1170
1171                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1172                         cmd->tvc_cdb[0], cmd->tvc_lun);
1173
1174                 if (prot_niov) {
1175                         ret = vhost_scsi_map_iov_to_prot(cmd,
1176                                         &vq->iov[prot_first], prot_niov,
1177                                         data_direction == DMA_FROM_DEVICE);
1178                         if (unlikely(ret)) {
1179                                 vq_err(vq, "Failed to map iov to"
1180                                         " prot_sgl\n");
1181                                 goto err_free;
1182                         }
1183                 }
1184                 if (data_direction != DMA_NONE) {
1185                         ret = vhost_scsi_map_iov_to_sgl(cmd,
1186                                         &vq->iov[data_first], data_niov,
1187                                         data_direction == DMA_FROM_DEVICE);
1188                         if (unlikely(ret)) {
1189                                 vq_err(vq, "Failed to map iov to sgl\n");
1190                                 goto err_free;
1191                         }
1192                 }
1193                 /*
1194                  * Save the descriptor from vhost_get_vq_desc() to be used to
1195                  * complete the virtio-scsi request in TCM callback context via
1196                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1197                  */
1198                 cmd->tvc_vq_desc = head;
1199                 /*
1200                  * Dispatch tv_cmd descriptor for cmwq execution in process
1201                  * context provided by tcm_vhost_workqueue.  This also ensures
1202                  * tv_cmd is executed on the same kworker CPU as this vhost
1203                  * thread to gain positive L2 cache locality effects..
1204                  */
1205                 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1206                 queue_work(tcm_vhost_workqueue, &cmd->work);
1207         }
1208
1209         mutex_unlock(&vq->mutex);
1210         return;
1211
1212 err_free:
1213         vhost_scsi_free_cmd(cmd);
1214 err_cmd:
1215         vhost_scsi_send_bad_target(vs, vq, head, out);
1216 out:
1217         mutex_unlock(&vq->mutex);
1218 }
1219
1220 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1221 {
1222         pr_debug("%s: The handling func for control queue.\n", __func__);
1223 }
1224
1225 static void
1226 tcm_vhost_send_evt(struct vhost_scsi *vs,
1227                    struct tcm_vhost_tpg *tpg,
1228                    struct se_lun *lun,
1229                    u32 event,
1230                    u32 reason)
1231 {
1232         struct tcm_vhost_evt *evt;
1233
1234         evt = tcm_vhost_allocate_evt(vs, event, reason);
1235         if (!evt)
1236                 return;
1237
1238         if (tpg && lun) {
1239                 /* TODO: share lun setup code with virtio-scsi.ko */
1240                 /*
1241                  * Note: evt->event is zeroed when we allocate it and
1242                  * lun[4-7] need to be zero according to virtio-scsi spec.
1243                  */
1244                 evt->event.lun[0] = 0x01;
1245                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1246                 if (lun->unpacked_lun >= 256)
1247                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1248                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1249         }
1250
1251         llist_add(&evt->list, &vs->vs_event_list);
1252         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1253 }
1254
1255 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1256 {
1257         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1258                                                 poll.work);
1259         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1260
1261         mutex_lock(&vq->mutex);
1262         if (!vq->private_data)
1263                 goto out;
1264
1265         if (vs->vs_events_missed)
1266                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1267 out:
1268         mutex_unlock(&vq->mutex);
1269 }
1270
1271 static void vhost_scsi_handle_kick(struct vhost_work *work)
1272 {
1273         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1274                                                 poll.work);
1275         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1276
1277         vhost_scsi_handle_vq(vs, vq);
1278 }
1279
1280 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1281 {
1282         vhost_poll_flush(&vs->vqs[index].vq.poll);
1283 }
1284
1285 /* Callers must hold dev mutex */
1286 static void vhost_scsi_flush(struct vhost_scsi *vs)
1287 {
1288         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1289         int i;
1290
1291         /* Init new inflight and remember the old inflight */
1292         tcm_vhost_init_inflight(vs, old_inflight);
1293
1294         /*
1295          * The inflight->kref was initialized to 1. We decrement it here to
1296          * indicate the start of the flush operation so that it will reach 0
1297          * when all the reqs are finished.
1298          */
1299         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1300                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1301
1302         /* Flush both the vhost poll and vhost work */
1303         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1304                 vhost_scsi_flush_vq(vs, i);
1305         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1306         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1307
1308         /* Wait for all reqs issued before the flush to be finished */
1309         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1310                 wait_for_completion(&old_inflight[i]->comp);
1311 }
1312
1313 /*
1314  * Called from vhost_scsi_ioctl() context to walk the list of available
1315  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1316  *
1317  *  The lock nesting rule is:
1318  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1319  */
1320 static int
1321 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1322                         struct vhost_scsi_target *t)
1323 {
1324         struct se_portal_group *se_tpg;
1325         struct tcm_vhost_tport *tv_tport;
1326         struct tcm_vhost_tpg *tpg;
1327         struct tcm_vhost_tpg **vs_tpg;
1328         struct vhost_virtqueue *vq;
1329         int index, ret, i, len;
1330         bool match = false;
1331
1332         mutex_lock(&tcm_vhost_mutex);
1333         mutex_lock(&vs->dev.mutex);
1334
1335         /* Verify that ring has been setup correctly. */
1336         for (index = 0; index < vs->dev.nvqs; ++index) {
1337                 /* Verify that ring has been setup correctly. */
1338                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1339                         ret = -EFAULT;
1340                         goto out;
1341                 }
1342         }
1343
1344         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1345         vs_tpg = kzalloc(len, GFP_KERNEL);
1346         if (!vs_tpg) {
1347                 ret = -ENOMEM;
1348                 goto out;
1349         }
1350         if (vs->vs_tpg)
1351                 memcpy(vs_tpg, vs->vs_tpg, len);
1352
1353         list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1354                 mutex_lock(&tpg->tv_tpg_mutex);
1355                 if (!tpg->tpg_nexus) {
1356                         mutex_unlock(&tpg->tv_tpg_mutex);
1357                         continue;
1358                 }
1359                 if (tpg->tv_tpg_vhost_count != 0) {
1360                         mutex_unlock(&tpg->tv_tpg_mutex);
1361                         continue;
1362                 }
1363                 tv_tport = tpg->tport;
1364
1365                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1366                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1367                                 kfree(vs_tpg);
1368                                 mutex_unlock(&tpg->tv_tpg_mutex);
1369                                 ret = -EEXIST;
1370                                 goto out;
1371                         }
1372                         /*
1373                          * In order to ensure individual vhost-scsi configfs
1374                          * groups cannot be removed while in use by vhost ioctl,
1375                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1376                          * dependency now.
1377                          */
1378                         se_tpg = &tpg->se_tpg;
1379                         ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1380                                                    &se_tpg->tpg_group.cg_item);
1381                         if (ret) {
1382                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1383                                 kfree(vs_tpg);
1384                                 mutex_unlock(&tpg->tv_tpg_mutex);
1385                                 goto out;
1386                         }
1387                         tpg->tv_tpg_vhost_count++;
1388                         tpg->vhost_scsi = vs;
1389                         vs_tpg[tpg->tport_tpgt] = tpg;
1390                         smp_mb__after_atomic();
1391                         match = true;
1392                 }
1393                 mutex_unlock(&tpg->tv_tpg_mutex);
1394         }
1395
1396         if (match) {
1397                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1398                        sizeof(vs->vs_vhost_wwpn));
1399                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1400                         vq = &vs->vqs[i].vq;
1401                         mutex_lock(&vq->mutex);
1402                         vq->private_data = vs_tpg;
1403                         vhost_init_used(vq);
1404                         mutex_unlock(&vq->mutex);
1405                 }
1406                 ret = 0;
1407         } else {
1408                 ret = -EEXIST;
1409         }
1410
1411         /*
1412          * Act as synchronize_rcu to make sure access to
1413          * old vs->vs_tpg is finished.
1414          */
1415         vhost_scsi_flush(vs);
1416         kfree(vs->vs_tpg);
1417         vs->vs_tpg = vs_tpg;
1418
1419 out:
1420         mutex_unlock(&vs->dev.mutex);
1421         mutex_unlock(&tcm_vhost_mutex);
1422         return ret;
1423 }
1424
1425 static int
1426 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1427                           struct vhost_scsi_target *t)
1428 {
1429         struct se_portal_group *se_tpg;
1430         struct tcm_vhost_tport *tv_tport;
1431         struct tcm_vhost_tpg *tpg;
1432         struct vhost_virtqueue *vq;
1433         bool match = false;
1434         int index, ret, i;
1435         u8 target;
1436
1437         mutex_lock(&tcm_vhost_mutex);
1438         mutex_lock(&vs->dev.mutex);
1439         /* Verify that ring has been setup correctly. */
1440         for (index = 0; index < vs->dev.nvqs; ++index) {
1441                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1442                         ret = -EFAULT;
1443                         goto err_dev;
1444                 }
1445         }
1446
1447         if (!vs->vs_tpg) {
1448                 ret = 0;
1449                 goto err_dev;
1450         }
1451
1452         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1453                 target = i;
1454                 tpg = vs->vs_tpg[target];
1455                 if (!tpg)
1456                         continue;
1457
1458                 mutex_lock(&tpg->tv_tpg_mutex);
1459                 tv_tport = tpg->tport;
1460                 if (!tv_tport) {
1461                         ret = -ENODEV;
1462                         goto err_tpg;
1463                 }
1464
1465                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1466                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1467                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1468                                 tv_tport->tport_name, tpg->tport_tpgt,
1469                                 t->vhost_wwpn, t->vhost_tpgt);
1470                         ret = -EINVAL;
1471                         goto err_tpg;
1472                 }
1473                 tpg->tv_tpg_vhost_count--;
1474                 tpg->vhost_scsi = NULL;
1475                 vs->vs_tpg[target] = NULL;
1476                 match = true;
1477                 mutex_unlock(&tpg->tv_tpg_mutex);
1478                 /*
1479                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1480                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1481                  */
1482                 se_tpg = &tpg->se_tpg;
1483                 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1484                                        &se_tpg->tpg_group.cg_item);
1485         }
1486         if (match) {
1487                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1488                         vq = &vs->vqs[i].vq;
1489                         mutex_lock(&vq->mutex);
1490                         vq->private_data = NULL;
1491                         mutex_unlock(&vq->mutex);
1492                 }
1493         }
1494         /*
1495          * Act as synchronize_rcu to make sure access to
1496          * old vs->vs_tpg is finished.
1497          */
1498         vhost_scsi_flush(vs);
1499         kfree(vs->vs_tpg);
1500         vs->vs_tpg = NULL;
1501         WARN_ON(vs->vs_events_nr);
1502         mutex_unlock(&vs->dev.mutex);
1503         mutex_unlock(&tcm_vhost_mutex);
1504         return 0;
1505
1506 err_tpg:
1507         mutex_unlock(&tpg->tv_tpg_mutex);
1508 err_dev:
1509         mutex_unlock(&vs->dev.mutex);
1510         mutex_unlock(&tcm_vhost_mutex);
1511         return ret;
1512 }
1513
1514 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1515 {
1516         struct vhost_virtqueue *vq;
1517         int i;
1518
1519         if (features & ~VHOST_SCSI_FEATURES)
1520                 return -EOPNOTSUPP;
1521
1522         mutex_lock(&vs->dev.mutex);
1523         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1524             !vhost_log_access_ok(&vs->dev)) {
1525                 mutex_unlock(&vs->dev.mutex);
1526                 return -EFAULT;
1527         }
1528
1529         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1530                 vq = &vs->vqs[i].vq;
1531                 mutex_lock(&vq->mutex);
1532                 vq->acked_features = features;
1533                 mutex_unlock(&vq->mutex);
1534         }
1535         mutex_unlock(&vs->dev.mutex);
1536         return 0;
1537 }
1538
1539 static int vhost_scsi_open(struct inode *inode, struct file *f)
1540 {
1541         struct vhost_scsi *vs;
1542         struct vhost_virtqueue **vqs;
1543         int r = -ENOMEM, i;
1544
1545         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1546         if (!vs) {
1547                 vs = vzalloc(sizeof(*vs));
1548                 if (!vs)
1549                         goto err_vs;
1550         }
1551
1552         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1553         if (!vqs)
1554                 goto err_vqs;
1555
1556         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1557         vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1558
1559         vs->vs_events_nr = 0;
1560         vs->vs_events_missed = false;
1561
1562         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1563         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1564         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1565         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1566         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1567                 vqs[i] = &vs->vqs[i].vq;
1568                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1569         }
1570         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1571
1572         tcm_vhost_init_inflight(vs, NULL);
1573
1574         f->private_data = vs;
1575         return 0;
1576
1577 err_vqs:
1578         kvfree(vs);
1579 err_vs:
1580         return r;
1581 }
1582
1583 static int vhost_scsi_release(struct inode *inode, struct file *f)
1584 {
1585         struct vhost_scsi *vs = f->private_data;
1586         struct vhost_scsi_target t;
1587
1588         mutex_lock(&vs->dev.mutex);
1589         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1590         mutex_unlock(&vs->dev.mutex);
1591         vhost_scsi_clear_endpoint(vs, &t);
1592         vhost_dev_stop(&vs->dev);
1593         vhost_dev_cleanup(&vs->dev, false);
1594         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1595         vhost_scsi_flush(vs);
1596         kfree(vs->dev.vqs);
1597         kvfree(vs);
1598         return 0;
1599 }
1600
1601 static long
1602 vhost_scsi_ioctl(struct file *f,
1603                  unsigned int ioctl,
1604                  unsigned long arg)
1605 {
1606         struct vhost_scsi *vs = f->private_data;
1607         struct vhost_scsi_target backend;
1608         void __user *argp = (void __user *)arg;
1609         u64 __user *featurep = argp;
1610         u32 __user *eventsp = argp;
1611         u32 events_missed;
1612         u64 features;
1613         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1614         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1615
1616         switch (ioctl) {
1617         case VHOST_SCSI_SET_ENDPOINT:
1618                 if (copy_from_user(&backend, argp, sizeof backend))
1619                         return -EFAULT;
1620                 if (backend.reserved != 0)
1621                         return -EOPNOTSUPP;
1622
1623                 return vhost_scsi_set_endpoint(vs, &backend);
1624         case VHOST_SCSI_CLEAR_ENDPOINT:
1625                 if (copy_from_user(&backend, argp, sizeof backend))
1626                         return -EFAULT;
1627                 if (backend.reserved != 0)
1628                         return -EOPNOTSUPP;
1629
1630                 return vhost_scsi_clear_endpoint(vs, &backend);
1631         case VHOST_SCSI_GET_ABI_VERSION:
1632                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1633                         return -EFAULT;
1634                 return 0;
1635         case VHOST_SCSI_SET_EVENTS_MISSED:
1636                 if (get_user(events_missed, eventsp))
1637                         return -EFAULT;
1638                 mutex_lock(&vq->mutex);
1639                 vs->vs_events_missed = events_missed;
1640                 mutex_unlock(&vq->mutex);
1641                 return 0;
1642         case VHOST_SCSI_GET_EVENTS_MISSED:
1643                 mutex_lock(&vq->mutex);
1644                 events_missed = vs->vs_events_missed;
1645                 mutex_unlock(&vq->mutex);
1646                 if (put_user(events_missed, eventsp))
1647                         return -EFAULT;
1648                 return 0;
1649         case VHOST_GET_FEATURES:
1650                 features = VHOST_SCSI_FEATURES;
1651                 if (copy_to_user(featurep, &features, sizeof features))
1652                         return -EFAULT;
1653                 return 0;
1654         case VHOST_SET_FEATURES:
1655                 if (copy_from_user(&features, featurep, sizeof features))
1656                         return -EFAULT;
1657                 return vhost_scsi_set_features(vs, features);
1658         default:
1659                 mutex_lock(&vs->dev.mutex);
1660                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1661                 /* TODO: flush backend after dev ioctl. */
1662                 if (r == -ENOIOCTLCMD)
1663                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1664                 mutex_unlock(&vs->dev.mutex);
1665                 return r;
1666         }
1667 }
1668
1669 #ifdef CONFIG_COMPAT
1670 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1671                                 unsigned long arg)
1672 {
1673         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1674 }
1675 #endif
1676
1677 static const struct file_operations vhost_scsi_fops = {
1678         .owner          = THIS_MODULE,
1679         .release        = vhost_scsi_release,
1680         .unlocked_ioctl = vhost_scsi_ioctl,
1681 #ifdef CONFIG_COMPAT
1682         .compat_ioctl   = vhost_scsi_compat_ioctl,
1683 #endif
1684         .open           = vhost_scsi_open,
1685         .llseek         = noop_llseek,
1686 };
1687
1688 static struct miscdevice vhost_scsi_misc = {
1689         MISC_DYNAMIC_MINOR,
1690         "vhost-scsi",
1691         &vhost_scsi_fops,
1692 };
1693
1694 static int __init vhost_scsi_register(void)
1695 {
1696         return misc_register(&vhost_scsi_misc);
1697 }
1698
1699 static int vhost_scsi_deregister(void)
1700 {
1701         return misc_deregister(&vhost_scsi_misc);
1702 }
1703
1704 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1705 {
1706         switch (tport->tport_proto_id) {
1707         case SCSI_PROTOCOL_SAS:
1708                 return "SAS";
1709         case SCSI_PROTOCOL_FCP:
1710                 return "FCP";
1711         case SCSI_PROTOCOL_ISCSI:
1712                 return "iSCSI";
1713         default:
1714                 break;
1715         }
1716
1717         return "Unknown";
1718 }
1719
1720 static void
1721 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1722                   struct se_lun *lun, bool plug)
1723 {
1724
1725         struct vhost_scsi *vs = tpg->vhost_scsi;
1726         struct vhost_virtqueue *vq;
1727         u32 reason;
1728
1729         if (!vs)
1730                 return;
1731
1732         mutex_lock(&vs->dev.mutex);
1733
1734         if (plug)
1735                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1736         else
1737                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1738
1739         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1740         mutex_lock(&vq->mutex);
1741         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1742                 tcm_vhost_send_evt(vs, tpg, lun,
1743                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1744         mutex_unlock(&vq->mutex);
1745         mutex_unlock(&vs->dev.mutex);
1746 }
1747
1748 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1749 {
1750         tcm_vhost_do_plug(tpg, lun, true);
1751 }
1752
1753 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1754 {
1755         tcm_vhost_do_plug(tpg, lun, false);
1756 }
1757
1758 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1759                                struct se_lun *lun)
1760 {
1761         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1762                                 struct tcm_vhost_tpg, se_tpg);
1763
1764         mutex_lock(&tcm_vhost_mutex);
1765
1766         mutex_lock(&tpg->tv_tpg_mutex);
1767         tpg->tv_tpg_port_count++;
1768         mutex_unlock(&tpg->tv_tpg_mutex);
1769
1770         tcm_vhost_hotplug(tpg, lun);
1771
1772         mutex_unlock(&tcm_vhost_mutex);
1773
1774         return 0;
1775 }
1776
1777 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1778                                   struct se_lun *lun)
1779 {
1780         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1781                                 struct tcm_vhost_tpg, se_tpg);
1782
1783         mutex_lock(&tcm_vhost_mutex);
1784
1785         mutex_lock(&tpg->tv_tpg_mutex);
1786         tpg->tv_tpg_port_count--;
1787         mutex_unlock(&tpg->tv_tpg_mutex);
1788
1789         tcm_vhost_hotunplug(tpg, lun);
1790
1791         mutex_unlock(&tcm_vhost_mutex);
1792 }
1793
1794 static struct se_node_acl *
1795 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1796                        struct config_group *group,
1797                        const char *name)
1798 {
1799         struct se_node_acl *se_nacl, *se_nacl_new;
1800         struct tcm_vhost_nacl *nacl;
1801         u64 wwpn = 0;
1802         u32 nexus_depth;
1803
1804         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1805                 return ERR_PTR(-EINVAL); */
1806         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1807         if (!se_nacl_new)
1808                 return ERR_PTR(-ENOMEM);
1809
1810         nexus_depth = 1;
1811         /*
1812          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1813          * when converting a NodeACL from demo mode -> explict
1814          */
1815         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1816                                 name, nexus_depth);
1817         if (IS_ERR(se_nacl)) {
1818                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1819                 return se_nacl;
1820         }
1821         /*
1822          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1823          */
1824         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1825         nacl->iport_wwpn = wwpn;
1826
1827         return se_nacl;
1828 }
1829
1830 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1831 {
1832         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1833                                 struct tcm_vhost_nacl, se_node_acl);
1834         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1835         kfree(nacl);
1836 }
1837
1838 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1839                                        struct se_session *se_sess)
1840 {
1841         struct tcm_vhost_cmd *tv_cmd;
1842         unsigned int i;
1843
1844         if (!se_sess->sess_cmd_map)
1845                 return;
1846
1847         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1848                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1849
1850                 kfree(tv_cmd->tvc_sgl);
1851                 kfree(tv_cmd->tvc_prot_sgl);
1852                 kfree(tv_cmd->tvc_upages);
1853         }
1854 }
1855
1856 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1857                                 const char *name)
1858 {
1859         struct se_portal_group *se_tpg;
1860         struct se_session *se_sess;
1861         struct tcm_vhost_nexus *tv_nexus;
1862         struct tcm_vhost_cmd *tv_cmd;
1863         unsigned int i;
1864
1865         mutex_lock(&tpg->tv_tpg_mutex);
1866         if (tpg->tpg_nexus) {
1867                 mutex_unlock(&tpg->tv_tpg_mutex);
1868                 pr_debug("tpg->tpg_nexus already exists\n");
1869                 return -EEXIST;
1870         }
1871         se_tpg = &tpg->se_tpg;
1872
1873         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1874         if (!tv_nexus) {
1875                 mutex_unlock(&tpg->tv_tpg_mutex);
1876                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1877                 return -ENOMEM;
1878         }
1879         /*
1880          *  Initialize the struct se_session pointer and setup tagpool
1881          *  for struct tcm_vhost_cmd descriptors
1882          */
1883         tv_nexus->tvn_se_sess = transport_init_session_tags(
1884                                         TCM_VHOST_DEFAULT_TAGS,
1885                                         sizeof(struct tcm_vhost_cmd),
1886                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1887         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1888                 mutex_unlock(&tpg->tv_tpg_mutex);
1889                 kfree(tv_nexus);
1890                 return -ENOMEM;
1891         }
1892         se_sess = tv_nexus->tvn_se_sess;
1893         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1894                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1895
1896                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1897                                         TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1898                 if (!tv_cmd->tvc_sgl) {
1899                         mutex_unlock(&tpg->tv_tpg_mutex);
1900                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1901                         goto out;
1902                 }
1903
1904                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1905                                         TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
1906                 if (!tv_cmd->tvc_upages) {
1907                         mutex_unlock(&tpg->tv_tpg_mutex);
1908                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1909                         goto out;
1910                 }
1911
1912                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1913                                         TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
1914                 if (!tv_cmd->tvc_prot_sgl) {
1915                         mutex_unlock(&tpg->tv_tpg_mutex);
1916                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1917                         goto out;
1918                 }
1919         }
1920         /*
1921          * Since we are running in 'demo mode' this call with generate a
1922          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1923          * the SCSI Initiator port name of the passed configfs group 'name'.
1924          */
1925         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1926                                 se_tpg, (unsigned char *)name);
1927         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1928                 mutex_unlock(&tpg->tv_tpg_mutex);
1929                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1930                                 " for %s\n", name);
1931                 goto out;
1932         }
1933         /*
1934          * Now register the TCM vhost virtual I_T Nexus as active with the
1935          * call to __transport_register_session()
1936          */
1937         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1938                         tv_nexus->tvn_se_sess, tv_nexus);
1939         tpg->tpg_nexus = tv_nexus;
1940
1941         mutex_unlock(&tpg->tv_tpg_mutex);
1942         return 0;
1943
1944 out:
1945         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1946         transport_free_session(se_sess);
1947         kfree(tv_nexus);
1948         return -ENOMEM;
1949 }
1950
1951 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1952 {
1953         struct se_session *se_sess;
1954         struct tcm_vhost_nexus *tv_nexus;
1955
1956         mutex_lock(&tpg->tv_tpg_mutex);
1957         tv_nexus = tpg->tpg_nexus;
1958         if (!tv_nexus) {
1959                 mutex_unlock(&tpg->tv_tpg_mutex);
1960                 return -ENODEV;
1961         }
1962
1963         se_sess = tv_nexus->tvn_se_sess;
1964         if (!se_sess) {
1965                 mutex_unlock(&tpg->tv_tpg_mutex);
1966                 return -ENODEV;
1967         }
1968
1969         if (tpg->tv_tpg_port_count != 0) {
1970                 mutex_unlock(&tpg->tv_tpg_mutex);
1971                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1972                         " active TPG port count: %d\n",
1973                         tpg->tv_tpg_port_count);
1974                 return -EBUSY;
1975         }
1976
1977         if (tpg->tv_tpg_vhost_count != 0) {
1978                 mutex_unlock(&tpg->tv_tpg_mutex);
1979                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1980                         " active TPG vhost count: %d\n",
1981                         tpg->tv_tpg_vhost_count);
1982                 return -EBUSY;
1983         }
1984
1985         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1986                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1987                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1988
1989         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1990         /*
1991          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1992          */
1993         transport_deregister_session(tv_nexus->tvn_se_sess);
1994         tpg->tpg_nexus = NULL;
1995         mutex_unlock(&tpg->tv_tpg_mutex);
1996
1997         kfree(tv_nexus);
1998         return 0;
1999 }
2000
2001 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
2002                                         char *page)
2003 {
2004         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2005                                 struct tcm_vhost_tpg, se_tpg);
2006         struct tcm_vhost_nexus *tv_nexus;
2007         ssize_t ret;
2008
2009         mutex_lock(&tpg->tv_tpg_mutex);
2010         tv_nexus = tpg->tpg_nexus;
2011         if (!tv_nexus) {
2012                 mutex_unlock(&tpg->tv_tpg_mutex);
2013                 return -ENODEV;
2014         }
2015         ret = snprintf(page, PAGE_SIZE, "%s\n",
2016                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2017         mutex_unlock(&tpg->tv_tpg_mutex);
2018
2019         return ret;
2020 }
2021
2022 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2023                                          const char *page,
2024                                          size_t count)
2025 {
2026         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2027                                 struct tcm_vhost_tpg, se_tpg);
2028         struct tcm_vhost_tport *tport_wwn = tpg->tport;
2029         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
2030         int ret;
2031         /*
2032          * Shutdown the active I_T nexus if 'NULL' is passed..
2033          */
2034         if (!strncmp(page, "NULL", 4)) {
2035                 ret = tcm_vhost_drop_nexus(tpg);
2036                 return (!ret) ? count : ret;
2037         }
2038         /*
2039          * Otherwise make sure the passed virtual Initiator port WWN matches
2040          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
2041          * tcm_vhost_make_nexus().
2042          */
2043         if (strlen(page) >= TCM_VHOST_NAMELEN) {
2044                 pr_err("Emulated NAA Sas Address: %s, exceeds"
2045                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
2046                 return -EINVAL;
2047         }
2048         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
2049
2050         ptr = strstr(i_port, "naa.");
2051         if (ptr) {
2052                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2053                         pr_err("Passed SAS Initiator Port %s does not"
2054                                 " match target port protoid: %s\n", i_port,
2055                                 tcm_vhost_dump_proto_id(tport_wwn));
2056                         return -EINVAL;
2057                 }
2058                 port_ptr = &i_port[0];
2059                 goto check_newline;
2060         }
2061         ptr = strstr(i_port, "fc.");
2062         if (ptr) {
2063                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2064                         pr_err("Passed FCP Initiator Port %s does not"
2065                                 " match target port protoid: %s\n", i_port,
2066                                 tcm_vhost_dump_proto_id(tport_wwn));
2067                         return -EINVAL;
2068                 }
2069                 port_ptr = &i_port[3]; /* Skip over "fc." */
2070                 goto check_newline;
2071         }
2072         ptr = strstr(i_port, "iqn.");
2073         if (ptr) {
2074                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2075                         pr_err("Passed iSCSI Initiator Port %s does not"
2076                                 " match target port protoid: %s\n", i_port,
2077                                 tcm_vhost_dump_proto_id(tport_wwn));
2078                         return -EINVAL;
2079                 }
2080                 port_ptr = &i_port[0];
2081                 goto check_newline;
2082         }
2083         pr_err("Unable to locate prefix for emulated Initiator Port:"
2084                         " %s\n", i_port);
2085         return -EINVAL;
2086         /*
2087          * Clear any trailing newline for the NAA WWN
2088          */
2089 check_newline:
2090         if (i_port[strlen(i_port)-1] == '\n')
2091                 i_port[strlen(i_port)-1] = '\0';
2092
2093         ret = tcm_vhost_make_nexus(tpg, port_ptr);
2094         if (ret < 0)
2095                 return ret;
2096
2097         return count;
2098 }
2099
2100 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
2101
2102 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
2103         &tcm_vhost_tpg_nexus.attr,
2104         NULL,
2105 };
2106
2107 static struct se_portal_group *
2108 tcm_vhost_make_tpg(struct se_wwn *wwn,
2109                    struct config_group *group,
2110                    const char *name)
2111 {
2112         struct tcm_vhost_tport *tport = container_of(wwn,
2113                         struct tcm_vhost_tport, tport_wwn);
2114
2115         struct tcm_vhost_tpg *tpg;
2116         unsigned long tpgt;
2117         int ret;
2118
2119         if (strstr(name, "tpgt_") != name)
2120                 return ERR_PTR(-EINVAL);
2121         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2122                 return ERR_PTR(-EINVAL);
2123
2124         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
2125         if (!tpg) {
2126                 pr_err("Unable to allocate struct tcm_vhost_tpg");
2127                 return ERR_PTR(-ENOMEM);
2128         }
2129         mutex_init(&tpg->tv_tpg_mutex);
2130         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2131         tpg->tport = tport;
2132         tpg->tport_tpgt = tpgt;
2133
2134         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
2135                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2136         if (ret < 0) {
2137                 kfree(tpg);
2138                 return NULL;
2139         }
2140         mutex_lock(&tcm_vhost_mutex);
2141         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
2142         mutex_unlock(&tcm_vhost_mutex);
2143
2144         return &tpg->se_tpg;
2145 }
2146
2147 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2148 {
2149         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2150                                 struct tcm_vhost_tpg, se_tpg);
2151
2152         mutex_lock(&tcm_vhost_mutex);
2153         list_del(&tpg->tv_tpg_list);
2154         mutex_unlock(&tcm_vhost_mutex);
2155         /*
2156          * Release the virtual I_T Nexus for this vhost TPG
2157          */
2158         tcm_vhost_drop_nexus(tpg);
2159         /*
2160          * Deregister the se_tpg from TCM..
2161          */
2162         core_tpg_deregister(se_tpg);
2163         kfree(tpg);
2164 }
2165
2166 static struct se_wwn *
2167 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2168                      struct config_group *group,
2169                      const char *name)
2170 {
2171         struct tcm_vhost_tport *tport;
2172         char *ptr;
2173         u64 wwpn = 0;
2174         int off = 0;
2175
2176         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2177                 return ERR_PTR(-EINVAL); */
2178
2179         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2180         if (!tport) {
2181                 pr_err("Unable to allocate struct tcm_vhost_tport");
2182                 return ERR_PTR(-ENOMEM);
2183         }
2184         tport->tport_wwpn = wwpn;
2185         /*
2186          * Determine the emulated Protocol Identifier and Target Port Name
2187          * based on the incoming configfs directory name.
2188          */
2189         ptr = strstr(name, "naa.");
2190         if (ptr) {
2191                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2192                 goto check_len;
2193         }
2194         ptr = strstr(name, "fc.");
2195         if (ptr) {
2196                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2197                 off = 3; /* Skip over "fc." */
2198                 goto check_len;
2199         }
2200         ptr = strstr(name, "iqn.");
2201         if (ptr) {
2202                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2203                 goto check_len;
2204         }
2205
2206         pr_err("Unable to locate prefix for emulated Target Port:"
2207                         " %s\n", name);
2208         kfree(tport);
2209         return ERR_PTR(-EINVAL);
2210
2211 check_len:
2212         if (strlen(name) >= TCM_VHOST_NAMELEN) {
2213                 pr_err("Emulated %s Address: %s, exceeds"
2214                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2215                         TCM_VHOST_NAMELEN);
2216                 kfree(tport);
2217                 return ERR_PTR(-EINVAL);
2218         }
2219         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2220
2221         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2222                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2223
2224         return &tport->tport_wwn;
2225 }
2226
2227 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2228 {
2229         struct tcm_vhost_tport *tport = container_of(wwn,
2230                                 struct tcm_vhost_tport, tport_wwn);
2231
2232         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2233                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2234                 tport->tport_name);
2235
2236         kfree(tport);
2237 }
2238
2239 static ssize_t
2240 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2241                                 char *page)
2242 {
2243         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2244                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2245                 utsname()->machine);
2246 }
2247
2248 TF_WWN_ATTR_RO(tcm_vhost, version);
2249
2250 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2251         &tcm_vhost_wwn_version.attr,
2252         NULL,
2253 };
2254
2255 static struct target_core_fabric_ops tcm_vhost_ops = {
2256         .get_fabric_name                = tcm_vhost_get_fabric_name,
2257         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2258         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2259         .tpg_get_tag                    = tcm_vhost_get_tag,
2260         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2261         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2262         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2263         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2264         .tpg_check_demo_mode            = tcm_vhost_check_true,
2265         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2266         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2267         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2268         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2269         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2270         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2271         .release_cmd                    = tcm_vhost_release_cmd,
2272         .check_stop_free                = vhost_scsi_check_stop_free,
2273         .shutdown_session               = tcm_vhost_shutdown_session,
2274         .close_session                  = tcm_vhost_close_session,
2275         .sess_get_index                 = tcm_vhost_sess_get_index,
2276         .sess_get_initiator_sid         = NULL,
2277         .write_pending                  = tcm_vhost_write_pending,
2278         .write_pending_status           = tcm_vhost_write_pending_status,
2279         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2280         .get_task_tag                   = tcm_vhost_get_task_tag,
2281         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2282         .queue_data_in                  = tcm_vhost_queue_data_in,
2283         .queue_status                   = tcm_vhost_queue_status,
2284         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2285         .aborted_task                   = tcm_vhost_aborted_task,
2286         /*
2287          * Setup callers for generic logic in target_core_fabric_configfs.c
2288          */
2289         .fabric_make_wwn                = tcm_vhost_make_tport,
2290         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2291         .fabric_make_tpg                = tcm_vhost_make_tpg,
2292         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2293         .fabric_post_link               = tcm_vhost_port_link,
2294         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2295         .fabric_make_np                 = NULL,
2296         .fabric_drop_np                 = NULL,
2297         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2298         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2299 };
2300
2301 static int tcm_vhost_register_configfs(void)
2302 {
2303         struct target_fabric_configfs *fabric;
2304         int ret;
2305
2306         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2307                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2308                 utsname()->machine);
2309         /*
2310          * Register the top level struct config_item_type with TCM core
2311          */
2312         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2313         if (IS_ERR(fabric)) {
2314                 pr_err("target_fabric_configfs_init() failed\n");
2315                 return PTR_ERR(fabric);
2316         }
2317         /*
2318          * Setup fabric->tf_ops from our local tcm_vhost_ops
2319          */
2320         fabric->tf_ops = tcm_vhost_ops;
2321         /*
2322          * Setup default attribute lists for various fabric->tf_cit_tmpl
2323          */
2324         fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2325         fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2326         fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2327         fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2328         fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2329         fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2330         fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2331         fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2332         fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2333         /*
2334          * Register the fabric for use within TCM
2335          */
2336         ret = target_fabric_configfs_register(fabric);
2337         if (ret < 0) {
2338                 pr_err("target_fabric_configfs_register() failed"
2339                                 " for TCM_VHOST\n");
2340                 return ret;
2341         }
2342         /*
2343          * Setup our local pointer to *fabric
2344          */
2345         tcm_vhost_fabric_configfs = fabric;
2346         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2347         return 0;
2348 };
2349
2350 static void tcm_vhost_deregister_configfs(void)
2351 {
2352         if (!tcm_vhost_fabric_configfs)
2353                 return;
2354
2355         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2356         tcm_vhost_fabric_configfs = NULL;
2357         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2358 };
2359
2360 static int __init tcm_vhost_init(void)
2361 {
2362         int ret = -ENOMEM;
2363         /*
2364          * Use our own dedicated workqueue for submitting I/O into
2365          * target core to avoid contention within system_wq.
2366          */
2367         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2368         if (!tcm_vhost_workqueue)
2369                 goto out;
2370
2371         ret = vhost_scsi_register();
2372         if (ret < 0)
2373                 goto out_destroy_workqueue;
2374
2375         ret = tcm_vhost_register_configfs();
2376         if (ret < 0)
2377                 goto out_vhost_scsi_deregister;
2378
2379         return 0;
2380
2381 out_vhost_scsi_deregister:
2382         vhost_scsi_deregister();
2383 out_destroy_workqueue:
2384         destroy_workqueue(tcm_vhost_workqueue);
2385 out:
2386         return ret;
2387 };
2388
2389 static void tcm_vhost_exit(void)
2390 {
2391         tcm_vhost_deregister_configfs();
2392         vhost_scsi_deregister();
2393         destroy_workqueue(tcm_vhost_workqueue);
2394 };
2395
2396 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2397 MODULE_ALIAS("tcm_vhost");
2398 MODULE_LICENSE("GPL");
2399 module_init(tcm_vhost_init);
2400 module_exit(tcm_vhost_exit);