vhost/scsi: Add ANY_LAYOUT support in vhost_scsi_handle_vq
[firefly-linux-kernel-4.4.55.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
52
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_UPAGES 2048
61 #define TCM_VHOST_PREALLOC_PROT_SGLS 512
62
63 struct vhost_scsi_inflight {
64         /* Wait for the flush operation to finish */
65         struct completion comp;
66         /* Refcount for the inflight reqs */
67         struct kref kref;
68 };
69
70 struct tcm_vhost_cmd {
71         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72         int tvc_vq_desc;
73         /* virtio-scsi initiator task attribute */
74         int tvc_task_attr;
75         /* virtio-scsi response incoming iovecs */
76         int tvc_in_iovs;
77         /* virtio-scsi initiator data direction */
78         enum dma_data_direction tvc_data_direction;
79         /* Expected data transfer length from virtio-scsi header */
80         u32 tvc_exp_data_len;
81         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
82         u64 tvc_tag;
83         /* The number of scatterlists associated with this cmd */
84         u32 tvc_sgl_count;
85         u32 tvc_prot_sgl_count;
86         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
87         u32 tvc_lun;
88         /* Pointer to the SGL formatted memory from virtio-scsi */
89         struct scatterlist *tvc_sgl;
90         struct scatterlist *tvc_prot_sgl;
91         struct page **tvc_upages;
92         /* Pointer to response header iovec */
93         struct iovec *tvc_resp_iov;
94         /* Pointer to vhost_scsi for our device */
95         struct vhost_scsi *tvc_vhost;
96         /* Pointer to vhost_virtqueue for the cmd */
97         struct vhost_virtqueue *tvc_vq;
98         /* Pointer to vhost nexus memory */
99         struct tcm_vhost_nexus *tvc_nexus;
100         /* The TCM I/O descriptor that is accessed via container_of() */
101         struct se_cmd tvc_se_cmd;
102         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
103         struct work_struct work;
104         /* Copy of the incoming SCSI command descriptor block (CDB) */
105         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
106         /* Sense buffer that will be mapped into outgoing status */
107         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
108         /* Completed commands list, serviced from vhost worker thread */
109         struct llist_node tvc_completion_list;
110         /* Used to track inflight cmd */
111         struct vhost_scsi_inflight *inflight;
112 };
113
114 struct tcm_vhost_nexus {
115         /* Pointer to TCM session for I_T Nexus */
116         struct se_session *tvn_se_sess;
117 };
118
119 struct tcm_vhost_nacl {
120         /* Binary World Wide unique Port Name for Vhost Initiator port */
121         u64 iport_wwpn;
122         /* ASCII formatted WWPN for Sas Initiator port */
123         char iport_name[TCM_VHOST_NAMELEN];
124         /* Returned by tcm_vhost_make_nodeacl() */
125         struct se_node_acl se_node_acl;
126 };
127
128 struct tcm_vhost_tpg {
129         /* Vhost port target portal group tag for TCM */
130         u16 tport_tpgt;
131         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
132         int tv_tpg_port_count;
133         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
134         int tv_tpg_vhost_count;
135         /* list for tcm_vhost_list */
136         struct list_head tv_tpg_list;
137         /* Used to protect access for tpg_nexus */
138         struct mutex tv_tpg_mutex;
139         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
140         struct tcm_vhost_nexus *tpg_nexus;
141         /* Pointer back to tcm_vhost_tport */
142         struct tcm_vhost_tport *tport;
143         /* Returned by tcm_vhost_make_tpg() */
144         struct se_portal_group se_tpg;
145         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
146         struct vhost_scsi *vhost_scsi;
147 };
148
149 struct tcm_vhost_tport {
150         /* SCSI protocol the tport is providing */
151         u8 tport_proto_id;
152         /* Binary World Wide unique Port Name for Vhost Target port */
153         u64 tport_wwpn;
154         /* ASCII formatted WWPN for Vhost Target port */
155         char tport_name[TCM_VHOST_NAMELEN];
156         /* Returned by tcm_vhost_make_tport() */
157         struct se_wwn tport_wwn;
158 };
159
160 struct tcm_vhost_evt {
161         /* event to be sent to guest */
162         struct virtio_scsi_event event;
163         /* event list, serviced from vhost worker thread */
164         struct llist_node list;
165 };
166
167 enum {
168         VHOST_SCSI_VQ_CTL = 0,
169         VHOST_SCSI_VQ_EVT = 1,
170         VHOST_SCSI_VQ_IO = 2,
171 };
172
173 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
174 enum {
175         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
176                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
177 };
178
179 #define VHOST_SCSI_MAX_TARGET   256
180 #define VHOST_SCSI_MAX_VQ       128
181 #define VHOST_SCSI_MAX_EVENT    128
182
183 struct vhost_scsi_virtqueue {
184         struct vhost_virtqueue vq;
185         /*
186          * Reference counting for inflight reqs, used for flush operation. At
187          * each time, one reference tracks new commands submitted, while we
188          * wait for another one to reach 0.
189          */
190         struct vhost_scsi_inflight inflights[2];
191         /*
192          * Indicate current inflight in use, protected by vq->mutex.
193          * Writers must also take dev mutex and flush under it.
194          */
195         int inflight_idx;
196 };
197
198 struct vhost_scsi {
199         /* Protected by vhost_scsi->dev.mutex */
200         struct tcm_vhost_tpg **vs_tpg;
201         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
202
203         struct vhost_dev dev;
204         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
205
206         struct vhost_work vs_completion_work; /* cmd completion work item */
207         struct llist_head vs_completion_list; /* cmd completion queue */
208
209         struct vhost_work vs_event_work; /* evt injection work item */
210         struct llist_head vs_event_list; /* evt injection queue */
211
212         bool vs_events_missed; /* any missed events, protected by vq->mutex */
213         int vs_events_nr; /* num of pending events, protected by vq->mutex */
214 };
215
216 /* Local pointer to allocated TCM configfs fabric module */
217 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
218
219 static struct workqueue_struct *tcm_vhost_workqueue;
220
221 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
222 static DEFINE_MUTEX(tcm_vhost_mutex);
223 static LIST_HEAD(tcm_vhost_list);
224
225 static int iov_num_pages(void __user *iov_base, size_t iov_len)
226 {
227         return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
228                ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
229 }
230
231 static void tcm_vhost_done_inflight(struct kref *kref)
232 {
233         struct vhost_scsi_inflight *inflight;
234
235         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
236         complete(&inflight->comp);
237 }
238
239 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
240                                     struct vhost_scsi_inflight *old_inflight[])
241 {
242         struct vhost_scsi_inflight *new_inflight;
243         struct vhost_virtqueue *vq;
244         int idx, i;
245
246         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
247                 vq = &vs->vqs[i].vq;
248
249                 mutex_lock(&vq->mutex);
250
251                 /* store old infight */
252                 idx = vs->vqs[i].inflight_idx;
253                 if (old_inflight)
254                         old_inflight[i] = &vs->vqs[i].inflights[idx];
255
256                 /* setup new infight */
257                 vs->vqs[i].inflight_idx = idx ^ 1;
258                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
259                 kref_init(&new_inflight->kref);
260                 init_completion(&new_inflight->comp);
261
262                 mutex_unlock(&vq->mutex);
263         }
264 }
265
266 static struct vhost_scsi_inflight *
267 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
268 {
269         struct vhost_scsi_inflight *inflight;
270         struct vhost_scsi_virtqueue *svq;
271
272         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
273         inflight = &svq->inflights[svq->inflight_idx];
274         kref_get(&inflight->kref);
275
276         return inflight;
277 }
278
279 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
280 {
281         kref_put(&inflight->kref, tcm_vhost_done_inflight);
282 }
283
284 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
285 {
286         return 1;
287 }
288
289 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
290 {
291         return 0;
292 }
293
294 static char *tcm_vhost_get_fabric_name(void)
295 {
296         return "vhost";
297 }
298
299 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
300 {
301         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
302                                 struct tcm_vhost_tpg, se_tpg);
303         struct tcm_vhost_tport *tport = tpg->tport;
304
305         switch (tport->tport_proto_id) {
306         case SCSI_PROTOCOL_SAS:
307                 return sas_get_fabric_proto_ident(se_tpg);
308         case SCSI_PROTOCOL_FCP:
309                 return fc_get_fabric_proto_ident(se_tpg);
310         case SCSI_PROTOCOL_ISCSI:
311                 return iscsi_get_fabric_proto_ident(se_tpg);
312         default:
313                 pr_err("Unknown tport_proto_id: 0x%02x, using"
314                         " SAS emulation\n", tport->tport_proto_id);
315                 break;
316         }
317
318         return sas_get_fabric_proto_ident(se_tpg);
319 }
320
321 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
322 {
323         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
324                                 struct tcm_vhost_tpg, se_tpg);
325         struct tcm_vhost_tport *tport = tpg->tport;
326
327         return &tport->tport_name[0];
328 }
329
330 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
331 {
332         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
333                                 struct tcm_vhost_tpg, se_tpg);
334         return tpg->tport_tpgt;
335 }
336
337 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
338 {
339         return 1;
340 }
341
342 static u32
343 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
344                               struct se_node_acl *se_nacl,
345                               struct t10_pr_registration *pr_reg,
346                               int *format_code,
347                               unsigned char *buf)
348 {
349         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
350                                 struct tcm_vhost_tpg, se_tpg);
351         struct tcm_vhost_tport *tport = tpg->tport;
352
353         switch (tport->tport_proto_id) {
354         case SCSI_PROTOCOL_SAS:
355                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
356                                         format_code, buf);
357         case SCSI_PROTOCOL_FCP:
358                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
359                                         format_code, buf);
360         case SCSI_PROTOCOL_ISCSI:
361                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
362                                         format_code, buf);
363         default:
364                 pr_err("Unknown tport_proto_id: 0x%02x, using"
365                         " SAS emulation\n", tport->tport_proto_id);
366                 break;
367         }
368
369         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
370                         format_code, buf);
371 }
372
373 static u32
374 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
375                                   struct se_node_acl *se_nacl,
376                                   struct t10_pr_registration *pr_reg,
377                                   int *format_code)
378 {
379         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
380                                 struct tcm_vhost_tpg, se_tpg);
381         struct tcm_vhost_tport *tport = tpg->tport;
382
383         switch (tport->tport_proto_id) {
384         case SCSI_PROTOCOL_SAS:
385                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
386                                         format_code);
387         case SCSI_PROTOCOL_FCP:
388                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
389                                         format_code);
390         case SCSI_PROTOCOL_ISCSI:
391                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
392                                         format_code);
393         default:
394                 pr_err("Unknown tport_proto_id: 0x%02x, using"
395                         " SAS emulation\n", tport->tport_proto_id);
396                 break;
397         }
398
399         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
400                         format_code);
401 }
402
403 static char *
404 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
405                                     const char *buf,
406                                     u32 *out_tid_len,
407                                     char **port_nexus_ptr)
408 {
409         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
410                                 struct tcm_vhost_tpg, se_tpg);
411         struct tcm_vhost_tport *tport = tpg->tport;
412
413         switch (tport->tport_proto_id) {
414         case SCSI_PROTOCOL_SAS:
415                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
416                                         port_nexus_ptr);
417         case SCSI_PROTOCOL_FCP:
418                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
419                                         port_nexus_ptr);
420         case SCSI_PROTOCOL_ISCSI:
421                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
422                                         port_nexus_ptr);
423         default:
424                 pr_err("Unknown tport_proto_id: 0x%02x, using"
425                         " SAS emulation\n", tport->tport_proto_id);
426                 break;
427         }
428
429         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
430                         port_nexus_ptr);
431 }
432
433 static struct se_node_acl *
434 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
435 {
436         struct tcm_vhost_nacl *nacl;
437
438         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
439         if (!nacl) {
440                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
441                 return NULL;
442         }
443
444         return &nacl->se_node_acl;
445 }
446
447 static void
448 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
449                              struct se_node_acl *se_nacl)
450 {
451         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
452                         struct tcm_vhost_nacl, se_node_acl);
453         kfree(nacl);
454 }
455
456 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
457 {
458         return 1;
459 }
460
461 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
462 {
463         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
464                                 struct tcm_vhost_cmd, tvc_se_cmd);
465         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
466         int i;
467
468         if (tv_cmd->tvc_sgl_count) {
469                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
470                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
471         }
472         if (tv_cmd->tvc_prot_sgl_count) {
473                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
474                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
475         }
476
477         tcm_vhost_put_inflight(tv_cmd->inflight);
478         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
479 }
480
481 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
482 {
483         return 0;
484 }
485
486 static void tcm_vhost_close_session(struct se_session *se_sess)
487 {
488         return;
489 }
490
491 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
492 {
493         return 0;
494 }
495
496 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
497 {
498         /* Go ahead and process the write immediately */
499         target_execute_cmd(se_cmd);
500         return 0;
501 }
502
503 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
504 {
505         return 0;
506 }
507
508 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
509 {
510         return;
511 }
512
513 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
514 {
515         return 0;
516 }
517
518 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
519 {
520         return 0;
521 }
522
523 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
524 {
525         struct vhost_scsi *vs = cmd->tvc_vhost;
526
527         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
528
529         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
530 }
531
532 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
533 {
534         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
535                                 struct tcm_vhost_cmd, tvc_se_cmd);
536         vhost_scsi_complete_cmd(cmd);
537         return 0;
538 }
539
540 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
541 {
542         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
543                                 struct tcm_vhost_cmd, tvc_se_cmd);
544         vhost_scsi_complete_cmd(cmd);
545         return 0;
546 }
547
548 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
549 {
550         return;
551 }
552
553 static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
554 {
555         return;
556 }
557
558 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
559 {
560         vs->vs_events_nr--;
561         kfree(evt);
562 }
563
564 static struct tcm_vhost_evt *
565 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
566                        u32 event, u32 reason)
567 {
568         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
569         struct tcm_vhost_evt *evt;
570
571         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
572                 vs->vs_events_missed = true;
573                 return NULL;
574         }
575
576         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
577         if (!evt) {
578                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
579                 vs->vs_events_missed = true;
580                 return NULL;
581         }
582
583         evt->event.event = cpu_to_vhost32(vq, event);
584         evt->event.reason = cpu_to_vhost32(vq, reason);
585         vs->vs_events_nr++;
586
587         return evt;
588 }
589
590 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
591 {
592         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
593
594         /* TODO locking against target/backend threads? */
595         transport_generic_free_cmd(se_cmd, 0);
596
597 }
598
599 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
600 {
601         return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
602 }
603
604 static void
605 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
606 {
607         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
608         struct virtio_scsi_event *event = &evt->event;
609         struct virtio_scsi_event __user *eventp;
610         unsigned out, in;
611         int head, ret;
612
613         if (!vq->private_data) {
614                 vs->vs_events_missed = true;
615                 return;
616         }
617
618 again:
619         vhost_disable_notify(&vs->dev, vq);
620         head = vhost_get_vq_desc(vq, vq->iov,
621                         ARRAY_SIZE(vq->iov), &out, &in,
622                         NULL, NULL);
623         if (head < 0) {
624                 vs->vs_events_missed = true;
625                 return;
626         }
627         if (head == vq->num) {
628                 if (vhost_enable_notify(&vs->dev, vq))
629                         goto again;
630                 vs->vs_events_missed = true;
631                 return;
632         }
633
634         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
635                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
636                                 vq->iov[out].iov_len);
637                 vs->vs_events_missed = true;
638                 return;
639         }
640
641         if (vs->vs_events_missed) {
642                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
643                 vs->vs_events_missed = false;
644         }
645
646         eventp = vq->iov[out].iov_base;
647         ret = __copy_to_user(eventp, event, sizeof(*event));
648         if (!ret)
649                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
650         else
651                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
652 }
653
654 static void tcm_vhost_evt_work(struct vhost_work *work)
655 {
656         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
657                                         vs_event_work);
658         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
659         struct tcm_vhost_evt *evt;
660         struct llist_node *llnode;
661
662         mutex_lock(&vq->mutex);
663         llnode = llist_del_all(&vs->vs_event_list);
664         while (llnode) {
665                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
666                 llnode = llist_next(llnode);
667                 tcm_vhost_do_evt_work(vs, evt);
668                 tcm_vhost_free_evt(vs, evt);
669         }
670         mutex_unlock(&vq->mutex);
671 }
672
673 /* Fill in status and signal that we are done processing this command
674  *
675  * This is scheduled in the vhost work queue so we are called with the owner
676  * process mm and can access the vring.
677  */
678 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
679 {
680         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
681                                         vs_completion_work);
682         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
683         struct virtio_scsi_cmd_resp v_rsp;
684         struct tcm_vhost_cmd *cmd;
685         struct llist_node *llnode;
686         struct se_cmd *se_cmd;
687         struct iov_iter iov_iter;
688         int ret, vq;
689
690         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
691         llnode = llist_del_all(&vs->vs_completion_list);
692         while (llnode) {
693                 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
694                                      tvc_completion_list);
695                 llnode = llist_next(llnode);
696                 se_cmd = &cmd->tvc_se_cmd;
697
698                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
699                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
700
701                 memset(&v_rsp, 0, sizeof(v_rsp));
702                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
703                 /* TODO is status_qualifier field needed? */
704                 v_rsp.status = se_cmd->scsi_status;
705                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
706                                                  se_cmd->scsi_sense_length);
707                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
708                        se_cmd->scsi_sense_length);
709
710                 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
711                               cmd->tvc_in_iovs, sizeof(v_rsp));
712                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
713                 if (likely(ret == sizeof(v_rsp))) {
714                         struct vhost_scsi_virtqueue *q;
715                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
716                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
717                         vq = q - vs->vqs;
718                         __set_bit(vq, signal);
719                 } else
720                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
721
722                 vhost_scsi_free_cmd(cmd);
723         }
724
725         vq = -1;
726         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
727                 < VHOST_SCSI_MAX_VQ)
728                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
729 }
730
731 static struct tcm_vhost_cmd *
732 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
733                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
734                    u32 exp_data_len, int data_direction)
735 {
736         struct tcm_vhost_cmd *cmd;
737         struct tcm_vhost_nexus *tv_nexus;
738         struct se_session *se_sess;
739         struct scatterlist *sg, *prot_sg;
740         struct page **pages;
741         int tag;
742
743         tv_nexus = tpg->tpg_nexus;
744         if (!tv_nexus) {
745                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
746                 return ERR_PTR(-EIO);
747         }
748         se_sess = tv_nexus->tvn_se_sess;
749
750         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
751         if (tag < 0) {
752                 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
753                 return ERR_PTR(-ENOMEM);
754         }
755
756         cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
757         sg = cmd->tvc_sgl;
758         prot_sg = cmd->tvc_prot_sgl;
759         pages = cmd->tvc_upages;
760         memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
761
762         cmd->tvc_sgl = sg;
763         cmd->tvc_prot_sgl = prot_sg;
764         cmd->tvc_upages = pages;
765         cmd->tvc_se_cmd.map_tag = tag;
766         cmd->tvc_tag = scsi_tag;
767         cmd->tvc_lun = lun;
768         cmd->tvc_task_attr = task_attr;
769         cmd->tvc_exp_data_len = exp_data_len;
770         cmd->tvc_data_direction = data_direction;
771         cmd->tvc_nexus = tv_nexus;
772         cmd->inflight = tcm_vhost_get_inflight(vq);
773
774         memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
775
776         return cmd;
777 }
778
779 /*
780  * Map a user memory range into a scatterlist
781  *
782  * Returns the number of scatterlist entries used or -errno on error.
783  */
784 static int
785 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *cmd,
786                       void __user *ptr,
787                       size_t len,
788                       struct scatterlist *sgl,
789                       bool write)
790 {
791         unsigned int npages = 0, offset, nbytes;
792         unsigned int pages_nr = iov_num_pages(ptr, len);
793         struct scatterlist *sg = sgl;
794         struct page **pages = cmd->tvc_upages;
795         int ret, i;
796
797         if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
798                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
799                        " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
800                         pages_nr, TCM_VHOST_PREALLOC_UPAGES);
801                 return -ENOBUFS;
802         }
803
804         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
805         /* No pages were pinned */
806         if (ret < 0)
807                 goto out;
808         /* Less pages pinned than wanted */
809         if (ret != pages_nr) {
810                 for (i = 0; i < ret; i++)
811                         put_page(pages[i]);
812                 ret = -EFAULT;
813                 goto out;
814         }
815
816         while (len > 0) {
817                 offset = (uintptr_t)ptr & ~PAGE_MASK;
818                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
819                 sg_set_page(sg, pages[npages], nbytes, offset);
820                 ptr += nbytes;
821                 len -= nbytes;
822                 sg++;
823                 npages++;
824         }
825
826 out:
827         return ret;
828 }
829
830 static int
831 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
832 {
833         int sgl_count = 0;
834
835         if (!iter || !iter->iov) {
836                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
837                        " present\n", __func__, bytes);
838                 return -EINVAL;
839         }
840
841         sgl_count = iov_iter_npages(iter, 0xffff);
842         if (sgl_count > max_sgls) {
843                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
844                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
845                 return -EINVAL;
846         }
847         return sgl_count;
848 }
849
850 static int
851 vhost_scsi_iov_to_sgl(struct tcm_vhost_cmd *cmd, bool write,
852                       struct iov_iter *iter, struct scatterlist *sg,
853                       int sg_count)
854 {
855         size_t off = iter->iov_offset;
856         int i, ret;
857
858         for (i = 0; i < iter->nr_segs; i++) {
859                 void __user *base = iter->iov[i].iov_base + off;
860                 size_t len = iter->iov[i].iov_len - off;
861
862                 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
863                 if (ret < 0) {
864                         for (i = 0; i < sg_count; i++) {
865                                 struct page *page = sg_page(&sg[i]);
866                                 if (page)
867                                         put_page(page);
868                         }
869                         return ret;
870                 }
871                 sg += ret;
872                 off = 0;
873         }
874         return 0;
875 }
876
877 static int
878 vhost_scsi_mapal(struct tcm_vhost_cmd *cmd,
879                  size_t prot_bytes, struct iov_iter *prot_iter,
880                  size_t data_bytes, struct iov_iter *data_iter)
881 {
882         int sgl_count, ret;
883         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
884
885         if (prot_bytes) {
886                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
887                                                  TCM_VHOST_PREALLOC_PROT_SGLS);
888                 if (sgl_count < 0)
889                         return sgl_count;
890
891                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
892                 cmd->tvc_prot_sgl_count = sgl_count;
893                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
894                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
895
896                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
897                                             cmd->tvc_prot_sgl,
898                                             cmd->tvc_prot_sgl_count);
899                 if (ret < 0) {
900                         cmd->tvc_prot_sgl_count = 0;
901                         return ret;
902                 }
903         }
904         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
905                                          TCM_VHOST_PREALLOC_SGLS);
906         if (sgl_count < 0)
907                 return sgl_count;
908
909         sg_init_table(cmd->tvc_sgl, sgl_count);
910         cmd->tvc_sgl_count = sgl_count;
911         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
912                   cmd->tvc_sgl, cmd->tvc_sgl_count);
913
914         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
915                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
916         if (ret < 0) {
917                 cmd->tvc_sgl_count = 0;
918                 return ret;
919         }
920         return 0;
921 }
922
923 static void tcm_vhost_submission_work(struct work_struct *work)
924 {
925         struct tcm_vhost_cmd *cmd =
926                 container_of(work, struct tcm_vhost_cmd, work);
927         struct tcm_vhost_nexus *tv_nexus;
928         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
929         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
930         int rc;
931
932         /* FIXME: BIDI operation */
933         if (cmd->tvc_sgl_count) {
934                 sg_ptr = cmd->tvc_sgl;
935
936                 if (cmd->tvc_prot_sgl_count)
937                         sg_prot_ptr = cmd->tvc_prot_sgl;
938                 else
939                         se_cmd->prot_pto = true;
940         } else {
941                 sg_ptr = NULL;
942         }
943         tv_nexus = cmd->tvc_nexus;
944
945         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
946                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
947                         cmd->tvc_lun, cmd->tvc_exp_data_len,
948                         cmd->tvc_task_attr, cmd->tvc_data_direction,
949                         TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
950                         NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
951         if (rc < 0) {
952                 transport_send_check_condition_and_sense(se_cmd,
953                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
954                 transport_generic_free_cmd(se_cmd, 0);
955         }
956 }
957
958 static void
959 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
960                            struct vhost_virtqueue *vq,
961                            int head, unsigned out)
962 {
963         struct virtio_scsi_cmd_resp __user *resp;
964         struct virtio_scsi_cmd_resp rsp;
965         int ret;
966
967         memset(&rsp, 0, sizeof(rsp));
968         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
969         resp = vq->iov[out].iov_base;
970         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
971         if (!ret)
972                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
973         else
974                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
975 }
976
977 static void
978 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
979 {
980         struct tcm_vhost_tpg **vs_tpg, *tpg;
981         struct virtio_scsi_cmd_req v_req;
982         struct virtio_scsi_cmd_req_pi v_req_pi;
983         struct tcm_vhost_cmd *cmd;
984         struct iov_iter out_iter, in_iter, prot_iter, data_iter;
985         u64 tag;
986         u32 exp_data_len, data_direction;
987         unsigned out, in;
988         int head, ret, prot_bytes;
989         size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
990         size_t out_size, in_size;
991         u16 lun;
992         u8 *target, *lunp, task_attr;
993         bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
994         void *req, *cdb;
995
996         mutex_lock(&vq->mutex);
997         /*
998          * We can handle the vq only after the endpoint is setup by calling the
999          * VHOST_SCSI_SET_ENDPOINT ioctl.
1000          */
1001         vs_tpg = vq->private_data;
1002         if (!vs_tpg)
1003                 goto out;
1004
1005         vhost_disable_notify(&vs->dev, vq);
1006
1007         for (;;) {
1008                 head = vhost_get_vq_desc(vq, vq->iov,
1009                                          ARRAY_SIZE(vq->iov), &out, &in,
1010                                          NULL, NULL);
1011                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1012                          head, out, in);
1013                 /* On error, stop handling until the next kick. */
1014                 if (unlikely(head < 0))
1015                         break;
1016                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
1017                 if (head == vq->num) {
1018                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1019                                 vhost_disable_notify(&vs->dev, vq);
1020                                 continue;
1021                         }
1022                         break;
1023                 }
1024                 /*
1025                  * Check for a sane response buffer so we can report early
1026                  * errors back to the guest.
1027                  */
1028                 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
1029                         vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
1030                                 " size, got %zu bytes\n", vq->iov[out].iov_len);
1031                         break;
1032                 }
1033                 /*
1034                  * Setup pointers and values based upon different virtio-scsi
1035                  * request header if T10_PI is enabled in KVM guest.
1036                  */
1037                 if (t10_pi) {
1038                         req = &v_req_pi;
1039                         req_size = sizeof(v_req_pi);
1040                         lunp = &v_req_pi.lun[0];
1041                         target = &v_req_pi.lun[1];
1042                 } else {
1043                         req = &v_req;
1044                         req_size = sizeof(v_req);
1045                         lunp = &v_req.lun[0];
1046                         target = &v_req.lun[1];
1047                 }
1048                 /*
1049                  * FIXME: Not correct for BIDI operation
1050                  */
1051                 out_size = iov_length(vq->iov, out);
1052                 in_size = iov_length(&vq->iov[out], in);
1053
1054                 /*
1055                  * Copy over the virtio-scsi request header, which for a
1056                  * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1057                  * single iovec may contain both the header + outgoing
1058                  * WRITE payloads.
1059                  *
1060                  * copy_from_iter() will advance out_iter, so that it will
1061                  * point at the start of the outgoing WRITE payload, if
1062                  * DMA_TO_DEVICE is set.
1063                  */
1064                 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1065
1066                 ret = copy_from_iter(req, req_size, &out_iter);
1067                 if (unlikely(ret != req_size)) {
1068                         vq_err(vq, "Faulted on copy_from_iter\n");
1069                         vhost_scsi_send_bad_target(vs, vq, head, out);
1070                         continue;
1071                 }
1072                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1073                 if (unlikely(*lunp != 1)) {
1074                         vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
1075                         vhost_scsi_send_bad_target(vs, vq, head, out);
1076                         continue;
1077                 }
1078
1079                 tpg = ACCESS_ONCE(vs_tpg[*target]);
1080                 if (unlikely(!tpg)) {
1081                         /* Target does not exist, fail the request */
1082                         vhost_scsi_send_bad_target(vs, vq, head, out);
1083                         continue;
1084                 }
1085                 /*
1086                  * Determine data_direction by calculating the total outgoing
1087                  * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1088                  * response headers respectively.
1089                  *
1090                  * For DMA_TO_DEVICE this is out_iter, which is already pointing
1091                  * to the right place.
1092                  *
1093                  * For DMA_FROM_DEVICE, the iovec will be just past the end
1094                  * of the virtio-scsi response header in either the same
1095                  * or immediately following iovec.
1096                  *
1097                  * Any associated T10_PI bytes for the outgoing / incoming
1098                  * payloads are included in calculation of exp_data_len here.
1099                  */
1100                 prot_bytes = 0;
1101
1102                 if (out_size > req_size) {
1103                         data_direction = DMA_TO_DEVICE;
1104                         exp_data_len = out_size - req_size;
1105                         data_iter = out_iter;
1106                 } else if (in_size > rsp_size) {
1107                         data_direction = DMA_FROM_DEVICE;
1108                         exp_data_len = in_size - rsp_size;
1109
1110                         iov_iter_init(&in_iter, READ, &vq->iov[out], in,
1111                                       rsp_size + exp_data_len);
1112                         iov_iter_advance(&in_iter, rsp_size);
1113                         data_iter = in_iter;
1114                 } else {
1115                         data_direction = DMA_NONE;
1116                         exp_data_len = 0;
1117                 }
1118                 /*
1119                  * If T10_PI header + payload is present, setup prot_iter values
1120                  * and recalculate data_iter for vhost_scsi_mapal() mapping to
1121                  * host scatterlists via get_user_pages_fast().
1122                  */
1123                 if (t10_pi) {
1124                         if (v_req_pi.pi_bytesout) {
1125                                 if (data_direction != DMA_TO_DEVICE) {
1126                                         vq_err(vq, "Received non zero pi_bytesout,"
1127                                                 " but wrong data_direction\n");
1128                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1129                                         continue;
1130                                 }
1131                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1132                         } else if (v_req_pi.pi_bytesin) {
1133                                 if (data_direction != DMA_FROM_DEVICE) {
1134                                         vq_err(vq, "Received non zero pi_bytesin,"
1135                                                 " but wrong data_direction\n");
1136                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1137                                         continue;
1138                                 }
1139                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1140                         }
1141                         /*
1142                          * Set prot_iter to data_iter, and advance past any
1143                          * preceeding prot_bytes that may be present.
1144                          *
1145                          * Also fix up the exp_data_len to reflect only the
1146                          * actual data payload length.
1147                          */
1148                         if (prot_bytes) {
1149                                 exp_data_len -= prot_bytes;
1150                                 prot_iter = data_iter;
1151                                 iov_iter_advance(&data_iter, prot_bytes);
1152                         }
1153                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
1154                         task_attr = v_req_pi.task_attr;
1155                         cdb = &v_req_pi.cdb[0];
1156                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1157                 } else {
1158                         tag = vhost64_to_cpu(vq, v_req.tag);
1159                         task_attr = v_req.task_attr;
1160                         cdb = &v_req.cdb[0];
1161                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1162                 }
1163                 /*
1164                  * Check that the received CDB size does not exceeded our
1165                  * hardcoded max for vhost-scsi, then get a pre-allocated
1166                  * cmd descriptor for the new virtio-scsi tag.
1167                  *
1168                  * TODO what if cdb was too small for varlen cdb header?
1169                  */
1170                 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
1171                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1172                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1173                                 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
1174                         vhost_scsi_send_bad_target(vs, vq, head, out);
1175                         continue;
1176                 }
1177                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1178                                          exp_data_len + prot_bytes,
1179                                          data_direction);
1180                 if (IS_ERR(cmd)) {
1181                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1182                                PTR_ERR(cmd));
1183                         vhost_scsi_send_bad_target(vs, vq, head, out);
1184                         continue;
1185                 }
1186                 cmd->tvc_vhost = vs;
1187                 cmd->tvc_vq = vq;
1188                 cmd->tvc_resp_iov = &vq->iov[out];
1189                 cmd->tvc_in_iovs = in;
1190
1191                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1192                          cmd->tvc_cdb[0], cmd->tvc_lun);
1193                 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1194                          " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1195
1196                 if (data_direction != DMA_NONE) {
1197                         ret = vhost_scsi_mapal(cmd,
1198                                                prot_bytes, &prot_iter,
1199                                                exp_data_len, &data_iter);
1200                         if (unlikely(ret)) {
1201                                 vq_err(vq, "Failed to map iov to sgl\n");
1202                                 tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
1203                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1204                                 continue;
1205                         }
1206                 }
1207                 /*
1208                  * Save the descriptor from vhost_get_vq_desc() to be used to
1209                  * complete the virtio-scsi request in TCM callback context via
1210                  * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1211                  */
1212                 cmd->tvc_vq_desc = head;
1213                 /*
1214                  * Dispatch cmd descriptor for cmwq execution in process
1215                  * context provided by vhost_scsi_workqueue.  This also ensures
1216                  * cmd is executed on the same kworker CPU as this vhost
1217                  * thread to gain positive L2 cache locality effects.
1218                  */
1219                 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1220                 queue_work(tcm_vhost_workqueue, &cmd->work);
1221         }
1222 out:
1223         mutex_unlock(&vq->mutex);
1224 }
1225
1226 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1227 {
1228         pr_debug("%s: The handling func for control queue.\n", __func__);
1229 }
1230
1231 static void
1232 tcm_vhost_send_evt(struct vhost_scsi *vs,
1233                    struct tcm_vhost_tpg *tpg,
1234                    struct se_lun *lun,
1235                    u32 event,
1236                    u32 reason)
1237 {
1238         struct tcm_vhost_evt *evt;
1239
1240         evt = tcm_vhost_allocate_evt(vs, event, reason);
1241         if (!evt)
1242                 return;
1243
1244         if (tpg && lun) {
1245                 /* TODO: share lun setup code with virtio-scsi.ko */
1246                 /*
1247                  * Note: evt->event is zeroed when we allocate it and
1248                  * lun[4-7] need to be zero according to virtio-scsi spec.
1249                  */
1250                 evt->event.lun[0] = 0x01;
1251                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1252                 if (lun->unpacked_lun >= 256)
1253                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1254                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1255         }
1256
1257         llist_add(&evt->list, &vs->vs_event_list);
1258         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1259 }
1260
1261 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1262 {
1263         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1264                                                 poll.work);
1265         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1266
1267         mutex_lock(&vq->mutex);
1268         if (!vq->private_data)
1269                 goto out;
1270
1271         if (vs->vs_events_missed)
1272                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1273 out:
1274         mutex_unlock(&vq->mutex);
1275 }
1276
1277 static void vhost_scsi_handle_kick(struct vhost_work *work)
1278 {
1279         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1280                                                 poll.work);
1281         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1282
1283         vhost_scsi_handle_vq(vs, vq);
1284 }
1285
1286 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1287 {
1288         vhost_poll_flush(&vs->vqs[index].vq.poll);
1289 }
1290
1291 /* Callers must hold dev mutex */
1292 static void vhost_scsi_flush(struct vhost_scsi *vs)
1293 {
1294         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1295         int i;
1296
1297         /* Init new inflight and remember the old inflight */
1298         tcm_vhost_init_inflight(vs, old_inflight);
1299
1300         /*
1301          * The inflight->kref was initialized to 1. We decrement it here to
1302          * indicate the start of the flush operation so that it will reach 0
1303          * when all the reqs are finished.
1304          */
1305         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1306                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1307
1308         /* Flush both the vhost poll and vhost work */
1309         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1310                 vhost_scsi_flush_vq(vs, i);
1311         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1312         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1313
1314         /* Wait for all reqs issued before the flush to be finished */
1315         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1316                 wait_for_completion(&old_inflight[i]->comp);
1317 }
1318
1319 /*
1320  * Called from vhost_scsi_ioctl() context to walk the list of available
1321  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1322  *
1323  *  The lock nesting rule is:
1324  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1325  */
1326 static int
1327 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1328                         struct vhost_scsi_target *t)
1329 {
1330         struct se_portal_group *se_tpg;
1331         struct tcm_vhost_tport *tv_tport;
1332         struct tcm_vhost_tpg *tpg;
1333         struct tcm_vhost_tpg **vs_tpg;
1334         struct vhost_virtqueue *vq;
1335         int index, ret, i, len;
1336         bool match = false;
1337
1338         mutex_lock(&tcm_vhost_mutex);
1339         mutex_lock(&vs->dev.mutex);
1340
1341         /* Verify that ring has been setup correctly. */
1342         for (index = 0; index < vs->dev.nvqs; ++index) {
1343                 /* Verify that ring has been setup correctly. */
1344                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1345                         ret = -EFAULT;
1346                         goto out;
1347                 }
1348         }
1349
1350         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1351         vs_tpg = kzalloc(len, GFP_KERNEL);
1352         if (!vs_tpg) {
1353                 ret = -ENOMEM;
1354                 goto out;
1355         }
1356         if (vs->vs_tpg)
1357                 memcpy(vs_tpg, vs->vs_tpg, len);
1358
1359         list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1360                 mutex_lock(&tpg->tv_tpg_mutex);
1361                 if (!tpg->tpg_nexus) {
1362                         mutex_unlock(&tpg->tv_tpg_mutex);
1363                         continue;
1364                 }
1365                 if (tpg->tv_tpg_vhost_count != 0) {
1366                         mutex_unlock(&tpg->tv_tpg_mutex);
1367                         continue;
1368                 }
1369                 tv_tport = tpg->tport;
1370
1371                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1372                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1373                                 kfree(vs_tpg);
1374                                 mutex_unlock(&tpg->tv_tpg_mutex);
1375                                 ret = -EEXIST;
1376                                 goto out;
1377                         }
1378                         /*
1379                          * In order to ensure individual vhost-scsi configfs
1380                          * groups cannot be removed while in use by vhost ioctl,
1381                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1382                          * dependency now.
1383                          */
1384                         se_tpg = &tpg->se_tpg;
1385                         ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1386                                                    &se_tpg->tpg_group.cg_item);
1387                         if (ret) {
1388                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1389                                 kfree(vs_tpg);
1390                                 mutex_unlock(&tpg->tv_tpg_mutex);
1391                                 goto out;
1392                         }
1393                         tpg->tv_tpg_vhost_count++;
1394                         tpg->vhost_scsi = vs;
1395                         vs_tpg[tpg->tport_tpgt] = tpg;
1396                         smp_mb__after_atomic();
1397                         match = true;
1398                 }
1399                 mutex_unlock(&tpg->tv_tpg_mutex);
1400         }
1401
1402         if (match) {
1403                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1404                        sizeof(vs->vs_vhost_wwpn));
1405                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1406                         vq = &vs->vqs[i].vq;
1407                         mutex_lock(&vq->mutex);
1408                         vq->private_data = vs_tpg;
1409                         vhost_init_used(vq);
1410                         mutex_unlock(&vq->mutex);
1411                 }
1412                 ret = 0;
1413         } else {
1414                 ret = -EEXIST;
1415         }
1416
1417         /*
1418          * Act as synchronize_rcu to make sure access to
1419          * old vs->vs_tpg is finished.
1420          */
1421         vhost_scsi_flush(vs);
1422         kfree(vs->vs_tpg);
1423         vs->vs_tpg = vs_tpg;
1424
1425 out:
1426         mutex_unlock(&vs->dev.mutex);
1427         mutex_unlock(&tcm_vhost_mutex);
1428         return ret;
1429 }
1430
1431 static int
1432 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1433                           struct vhost_scsi_target *t)
1434 {
1435         struct se_portal_group *se_tpg;
1436         struct tcm_vhost_tport *tv_tport;
1437         struct tcm_vhost_tpg *tpg;
1438         struct vhost_virtqueue *vq;
1439         bool match = false;
1440         int index, ret, i;
1441         u8 target;
1442
1443         mutex_lock(&tcm_vhost_mutex);
1444         mutex_lock(&vs->dev.mutex);
1445         /* Verify that ring has been setup correctly. */
1446         for (index = 0; index < vs->dev.nvqs; ++index) {
1447                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1448                         ret = -EFAULT;
1449                         goto err_dev;
1450                 }
1451         }
1452
1453         if (!vs->vs_tpg) {
1454                 ret = 0;
1455                 goto err_dev;
1456         }
1457
1458         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1459                 target = i;
1460                 tpg = vs->vs_tpg[target];
1461                 if (!tpg)
1462                         continue;
1463
1464                 mutex_lock(&tpg->tv_tpg_mutex);
1465                 tv_tport = tpg->tport;
1466                 if (!tv_tport) {
1467                         ret = -ENODEV;
1468                         goto err_tpg;
1469                 }
1470
1471                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1472                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1473                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1474                                 tv_tport->tport_name, tpg->tport_tpgt,
1475                                 t->vhost_wwpn, t->vhost_tpgt);
1476                         ret = -EINVAL;
1477                         goto err_tpg;
1478                 }
1479                 tpg->tv_tpg_vhost_count--;
1480                 tpg->vhost_scsi = NULL;
1481                 vs->vs_tpg[target] = NULL;
1482                 match = true;
1483                 mutex_unlock(&tpg->tv_tpg_mutex);
1484                 /*
1485                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1486                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1487                  */
1488                 se_tpg = &tpg->se_tpg;
1489                 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1490                                        &se_tpg->tpg_group.cg_item);
1491         }
1492         if (match) {
1493                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1494                         vq = &vs->vqs[i].vq;
1495                         mutex_lock(&vq->mutex);
1496                         vq->private_data = NULL;
1497                         mutex_unlock(&vq->mutex);
1498                 }
1499         }
1500         /*
1501          * Act as synchronize_rcu to make sure access to
1502          * old vs->vs_tpg is finished.
1503          */
1504         vhost_scsi_flush(vs);
1505         kfree(vs->vs_tpg);
1506         vs->vs_tpg = NULL;
1507         WARN_ON(vs->vs_events_nr);
1508         mutex_unlock(&vs->dev.mutex);
1509         mutex_unlock(&tcm_vhost_mutex);
1510         return 0;
1511
1512 err_tpg:
1513         mutex_unlock(&tpg->tv_tpg_mutex);
1514 err_dev:
1515         mutex_unlock(&vs->dev.mutex);
1516         mutex_unlock(&tcm_vhost_mutex);
1517         return ret;
1518 }
1519
1520 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1521 {
1522         struct vhost_virtqueue *vq;
1523         int i;
1524
1525         if (features & ~VHOST_SCSI_FEATURES)
1526                 return -EOPNOTSUPP;
1527
1528         mutex_lock(&vs->dev.mutex);
1529         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1530             !vhost_log_access_ok(&vs->dev)) {
1531                 mutex_unlock(&vs->dev.mutex);
1532                 return -EFAULT;
1533         }
1534
1535         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1536                 vq = &vs->vqs[i].vq;
1537                 mutex_lock(&vq->mutex);
1538                 vq->acked_features = features;
1539                 mutex_unlock(&vq->mutex);
1540         }
1541         mutex_unlock(&vs->dev.mutex);
1542         return 0;
1543 }
1544
1545 static int vhost_scsi_open(struct inode *inode, struct file *f)
1546 {
1547         struct vhost_scsi *vs;
1548         struct vhost_virtqueue **vqs;
1549         int r = -ENOMEM, i;
1550
1551         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1552         if (!vs) {
1553                 vs = vzalloc(sizeof(*vs));
1554                 if (!vs)
1555                         goto err_vs;
1556         }
1557
1558         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1559         if (!vqs)
1560                 goto err_vqs;
1561
1562         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1563         vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1564
1565         vs->vs_events_nr = 0;
1566         vs->vs_events_missed = false;
1567
1568         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1569         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1570         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1571         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1572         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1573                 vqs[i] = &vs->vqs[i].vq;
1574                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1575         }
1576         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1577
1578         tcm_vhost_init_inflight(vs, NULL);
1579
1580         f->private_data = vs;
1581         return 0;
1582
1583 err_vqs:
1584         kvfree(vs);
1585 err_vs:
1586         return r;
1587 }
1588
1589 static int vhost_scsi_release(struct inode *inode, struct file *f)
1590 {
1591         struct vhost_scsi *vs = f->private_data;
1592         struct vhost_scsi_target t;
1593
1594         mutex_lock(&vs->dev.mutex);
1595         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1596         mutex_unlock(&vs->dev.mutex);
1597         vhost_scsi_clear_endpoint(vs, &t);
1598         vhost_dev_stop(&vs->dev);
1599         vhost_dev_cleanup(&vs->dev, false);
1600         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1601         vhost_scsi_flush(vs);
1602         kfree(vs->dev.vqs);
1603         kvfree(vs);
1604         return 0;
1605 }
1606
1607 static long
1608 vhost_scsi_ioctl(struct file *f,
1609                  unsigned int ioctl,
1610                  unsigned long arg)
1611 {
1612         struct vhost_scsi *vs = f->private_data;
1613         struct vhost_scsi_target backend;
1614         void __user *argp = (void __user *)arg;
1615         u64 __user *featurep = argp;
1616         u32 __user *eventsp = argp;
1617         u32 events_missed;
1618         u64 features;
1619         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1620         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1621
1622         switch (ioctl) {
1623         case VHOST_SCSI_SET_ENDPOINT:
1624                 if (copy_from_user(&backend, argp, sizeof backend))
1625                         return -EFAULT;
1626                 if (backend.reserved != 0)
1627                         return -EOPNOTSUPP;
1628
1629                 return vhost_scsi_set_endpoint(vs, &backend);
1630         case VHOST_SCSI_CLEAR_ENDPOINT:
1631                 if (copy_from_user(&backend, argp, sizeof backend))
1632                         return -EFAULT;
1633                 if (backend.reserved != 0)
1634                         return -EOPNOTSUPP;
1635
1636                 return vhost_scsi_clear_endpoint(vs, &backend);
1637         case VHOST_SCSI_GET_ABI_VERSION:
1638                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1639                         return -EFAULT;
1640                 return 0;
1641         case VHOST_SCSI_SET_EVENTS_MISSED:
1642                 if (get_user(events_missed, eventsp))
1643                         return -EFAULT;
1644                 mutex_lock(&vq->mutex);
1645                 vs->vs_events_missed = events_missed;
1646                 mutex_unlock(&vq->mutex);
1647                 return 0;
1648         case VHOST_SCSI_GET_EVENTS_MISSED:
1649                 mutex_lock(&vq->mutex);
1650                 events_missed = vs->vs_events_missed;
1651                 mutex_unlock(&vq->mutex);
1652                 if (put_user(events_missed, eventsp))
1653                         return -EFAULT;
1654                 return 0;
1655         case VHOST_GET_FEATURES:
1656                 features = VHOST_SCSI_FEATURES;
1657                 if (copy_to_user(featurep, &features, sizeof features))
1658                         return -EFAULT;
1659                 return 0;
1660         case VHOST_SET_FEATURES:
1661                 if (copy_from_user(&features, featurep, sizeof features))
1662                         return -EFAULT;
1663                 return vhost_scsi_set_features(vs, features);
1664         default:
1665                 mutex_lock(&vs->dev.mutex);
1666                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1667                 /* TODO: flush backend after dev ioctl. */
1668                 if (r == -ENOIOCTLCMD)
1669                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1670                 mutex_unlock(&vs->dev.mutex);
1671                 return r;
1672         }
1673 }
1674
1675 #ifdef CONFIG_COMPAT
1676 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1677                                 unsigned long arg)
1678 {
1679         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1680 }
1681 #endif
1682
1683 static const struct file_operations vhost_scsi_fops = {
1684         .owner          = THIS_MODULE,
1685         .release        = vhost_scsi_release,
1686         .unlocked_ioctl = vhost_scsi_ioctl,
1687 #ifdef CONFIG_COMPAT
1688         .compat_ioctl   = vhost_scsi_compat_ioctl,
1689 #endif
1690         .open           = vhost_scsi_open,
1691         .llseek         = noop_llseek,
1692 };
1693
1694 static struct miscdevice vhost_scsi_misc = {
1695         MISC_DYNAMIC_MINOR,
1696         "vhost-scsi",
1697         &vhost_scsi_fops,
1698 };
1699
1700 static int __init vhost_scsi_register(void)
1701 {
1702         return misc_register(&vhost_scsi_misc);
1703 }
1704
1705 static int vhost_scsi_deregister(void)
1706 {
1707         return misc_deregister(&vhost_scsi_misc);
1708 }
1709
1710 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1711 {
1712         switch (tport->tport_proto_id) {
1713         case SCSI_PROTOCOL_SAS:
1714                 return "SAS";
1715         case SCSI_PROTOCOL_FCP:
1716                 return "FCP";
1717         case SCSI_PROTOCOL_ISCSI:
1718                 return "iSCSI";
1719         default:
1720                 break;
1721         }
1722
1723         return "Unknown";
1724 }
1725
1726 static void
1727 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1728                   struct se_lun *lun, bool plug)
1729 {
1730
1731         struct vhost_scsi *vs = tpg->vhost_scsi;
1732         struct vhost_virtqueue *vq;
1733         u32 reason;
1734
1735         if (!vs)
1736                 return;
1737
1738         mutex_lock(&vs->dev.mutex);
1739
1740         if (plug)
1741                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1742         else
1743                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1744
1745         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1746         mutex_lock(&vq->mutex);
1747         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1748                 tcm_vhost_send_evt(vs, tpg, lun,
1749                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1750         mutex_unlock(&vq->mutex);
1751         mutex_unlock(&vs->dev.mutex);
1752 }
1753
1754 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1755 {
1756         tcm_vhost_do_plug(tpg, lun, true);
1757 }
1758
1759 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1760 {
1761         tcm_vhost_do_plug(tpg, lun, false);
1762 }
1763
1764 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1765                                struct se_lun *lun)
1766 {
1767         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1768                                 struct tcm_vhost_tpg, se_tpg);
1769
1770         mutex_lock(&tcm_vhost_mutex);
1771
1772         mutex_lock(&tpg->tv_tpg_mutex);
1773         tpg->tv_tpg_port_count++;
1774         mutex_unlock(&tpg->tv_tpg_mutex);
1775
1776         tcm_vhost_hotplug(tpg, lun);
1777
1778         mutex_unlock(&tcm_vhost_mutex);
1779
1780         return 0;
1781 }
1782
1783 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1784                                   struct se_lun *lun)
1785 {
1786         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1787                                 struct tcm_vhost_tpg, se_tpg);
1788
1789         mutex_lock(&tcm_vhost_mutex);
1790
1791         mutex_lock(&tpg->tv_tpg_mutex);
1792         tpg->tv_tpg_port_count--;
1793         mutex_unlock(&tpg->tv_tpg_mutex);
1794
1795         tcm_vhost_hotunplug(tpg, lun);
1796
1797         mutex_unlock(&tcm_vhost_mutex);
1798 }
1799
1800 static struct se_node_acl *
1801 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1802                        struct config_group *group,
1803                        const char *name)
1804 {
1805         struct se_node_acl *se_nacl, *se_nacl_new;
1806         struct tcm_vhost_nacl *nacl;
1807         u64 wwpn = 0;
1808         u32 nexus_depth;
1809
1810         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1811                 return ERR_PTR(-EINVAL); */
1812         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1813         if (!se_nacl_new)
1814                 return ERR_PTR(-ENOMEM);
1815
1816         nexus_depth = 1;
1817         /*
1818          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1819          * when converting a NodeACL from demo mode -> explict
1820          */
1821         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1822                                 name, nexus_depth);
1823         if (IS_ERR(se_nacl)) {
1824                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1825                 return se_nacl;
1826         }
1827         /*
1828          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1829          */
1830         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1831         nacl->iport_wwpn = wwpn;
1832
1833         return se_nacl;
1834 }
1835
1836 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1837 {
1838         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1839                                 struct tcm_vhost_nacl, se_node_acl);
1840         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1841         kfree(nacl);
1842 }
1843
1844 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1845                                        struct se_session *se_sess)
1846 {
1847         struct tcm_vhost_cmd *tv_cmd;
1848         unsigned int i;
1849
1850         if (!se_sess->sess_cmd_map)
1851                 return;
1852
1853         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1854                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1855
1856                 kfree(tv_cmd->tvc_sgl);
1857                 kfree(tv_cmd->tvc_prot_sgl);
1858                 kfree(tv_cmd->tvc_upages);
1859         }
1860 }
1861
1862 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1863                                 const char *name)
1864 {
1865         struct se_portal_group *se_tpg;
1866         struct se_session *se_sess;
1867         struct tcm_vhost_nexus *tv_nexus;
1868         struct tcm_vhost_cmd *tv_cmd;
1869         unsigned int i;
1870
1871         mutex_lock(&tpg->tv_tpg_mutex);
1872         if (tpg->tpg_nexus) {
1873                 mutex_unlock(&tpg->tv_tpg_mutex);
1874                 pr_debug("tpg->tpg_nexus already exists\n");
1875                 return -EEXIST;
1876         }
1877         se_tpg = &tpg->se_tpg;
1878
1879         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1880         if (!tv_nexus) {
1881                 mutex_unlock(&tpg->tv_tpg_mutex);
1882                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1883                 return -ENOMEM;
1884         }
1885         /*
1886          *  Initialize the struct se_session pointer and setup tagpool
1887          *  for struct tcm_vhost_cmd descriptors
1888          */
1889         tv_nexus->tvn_se_sess = transport_init_session_tags(
1890                                         TCM_VHOST_DEFAULT_TAGS,
1891                                         sizeof(struct tcm_vhost_cmd),
1892                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1893         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1894                 mutex_unlock(&tpg->tv_tpg_mutex);
1895                 kfree(tv_nexus);
1896                 return -ENOMEM;
1897         }
1898         se_sess = tv_nexus->tvn_se_sess;
1899         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1900                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1901
1902                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1903                                         TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1904                 if (!tv_cmd->tvc_sgl) {
1905                         mutex_unlock(&tpg->tv_tpg_mutex);
1906                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1907                         goto out;
1908                 }
1909
1910                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1911                                         TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
1912                 if (!tv_cmd->tvc_upages) {
1913                         mutex_unlock(&tpg->tv_tpg_mutex);
1914                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1915                         goto out;
1916                 }
1917
1918                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1919                                         TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
1920                 if (!tv_cmd->tvc_prot_sgl) {
1921                         mutex_unlock(&tpg->tv_tpg_mutex);
1922                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1923                         goto out;
1924                 }
1925         }
1926         /*
1927          * Since we are running in 'demo mode' this call with generate a
1928          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1929          * the SCSI Initiator port name of the passed configfs group 'name'.
1930          */
1931         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1932                                 se_tpg, (unsigned char *)name);
1933         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1934                 mutex_unlock(&tpg->tv_tpg_mutex);
1935                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1936                                 " for %s\n", name);
1937                 goto out;
1938         }
1939         /*
1940          * Now register the TCM vhost virtual I_T Nexus as active with the
1941          * call to __transport_register_session()
1942          */
1943         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1944                         tv_nexus->tvn_se_sess, tv_nexus);
1945         tpg->tpg_nexus = tv_nexus;
1946
1947         mutex_unlock(&tpg->tv_tpg_mutex);
1948         return 0;
1949
1950 out:
1951         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1952         transport_free_session(se_sess);
1953         kfree(tv_nexus);
1954         return -ENOMEM;
1955 }
1956
1957 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1958 {
1959         struct se_session *se_sess;
1960         struct tcm_vhost_nexus *tv_nexus;
1961
1962         mutex_lock(&tpg->tv_tpg_mutex);
1963         tv_nexus = tpg->tpg_nexus;
1964         if (!tv_nexus) {
1965                 mutex_unlock(&tpg->tv_tpg_mutex);
1966                 return -ENODEV;
1967         }
1968
1969         se_sess = tv_nexus->tvn_se_sess;
1970         if (!se_sess) {
1971                 mutex_unlock(&tpg->tv_tpg_mutex);
1972                 return -ENODEV;
1973         }
1974
1975         if (tpg->tv_tpg_port_count != 0) {
1976                 mutex_unlock(&tpg->tv_tpg_mutex);
1977                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1978                         " active TPG port count: %d\n",
1979                         tpg->tv_tpg_port_count);
1980                 return -EBUSY;
1981         }
1982
1983         if (tpg->tv_tpg_vhost_count != 0) {
1984                 mutex_unlock(&tpg->tv_tpg_mutex);
1985                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1986                         " active TPG vhost count: %d\n",
1987                         tpg->tv_tpg_vhost_count);
1988                 return -EBUSY;
1989         }
1990
1991         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1992                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1993                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1994
1995         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1996         /*
1997          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1998          */
1999         transport_deregister_session(tv_nexus->tvn_se_sess);
2000         tpg->tpg_nexus = NULL;
2001         mutex_unlock(&tpg->tv_tpg_mutex);
2002
2003         kfree(tv_nexus);
2004         return 0;
2005 }
2006
2007 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
2008                                         char *page)
2009 {
2010         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2011                                 struct tcm_vhost_tpg, se_tpg);
2012         struct tcm_vhost_nexus *tv_nexus;
2013         ssize_t ret;
2014
2015         mutex_lock(&tpg->tv_tpg_mutex);
2016         tv_nexus = tpg->tpg_nexus;
2017         if (!tv_nexus) {
2018                 mutex_unlock(&tpg->tv_tpg_mutex);
2019                 return -ENODEV;
2020         }
2021         ret = snprintf(page, PAGE_SIZE, "%s\n",
2022                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2023         mutex_unlock(&tpg->tv_tpg_mutex);
2024
2025         return ret;
2026 }
2027
2028 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2029                                          const char *page,
2030                                          size_t count)
2031 {
2032         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2033                                 struct tcm_vhost_tpg, se_tpg);
2034         struct tcm_vhost_tport *tport_wwn = tpg->tport;
2035         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
2036         int ret;
2037         /*
2038          * Shutdown the active I_T nexus if 'NULL' is passed..
2039          */
2040         if (!strncmp(page, "NULL", 4)) {
2041                 ret = tcm_vhost_drop_nexus(tpg);
2042                 return (!ret) ? count : ret;
2043         }
2044         /*
2045          * Otherwise make sure the passed virtual Initiator port WWN matches
2046          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
2047          * tcm_vhost_make_nexus().
2048          */
2049         if (strlen(page) >= TCM_VHOST_NAMELEN) {
2050                 pr_err("Emulated NAA Sas Address: %s, exceeds"
2051                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
2052                 return -EINVAL;
2053         }
2054         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
2055
2056         ptr = strstr(i_port, "naa.");
2057         if (ptr) {
2058                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2059                         pr_err("Passed SAS Initiator Port %s does not"
2060                                 " match target port protoid: %s\n", i_port,
2061                                 tcm_vhost_dump_proto_id(tport_wwn));
2062                         return -EINVAL;
2063                 }
2064                 port_ptr = &i_port[0];
2065                 goto check_newline;
2066         }
2067         ptr = strstr(i_port, "fc.");
2068         if (ptr) {
2069                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2070                         pr_err("Passed FCP Initiator Port %s does not"
2071                                 " match target port protoid: %s\n", i_port,
2072                                 tcm_vhost_dump_proto_id(tport_wwn));
2073                         return -EINVAL;
2074                 }
2075                 port_ptr = &i_port[3]; /* Skip over "fc." */
2076                 goto check_newline;
2077         }
2078         ptr = strstr(i_port, "iqn.");
2079         if (ptr) {
2080                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2081                         pr_err("Passed iSCSI Initiator Port %s does not"
2082                                 " match target port protoid: %s\n", i_port,
2083                                 tcm_vhost_dump_proto_id(tport_wwn));
2084                         return -EINVAL;
2085                 }
2086                 port_ptr = &i_port[0];
2087                 goto check_newline;
2088         }
2089         pr_err("Unable to locate prefix for emulated Initiator Port:"
2090                         " %s\n", i_port);
2091         return -EINVAL;
2092         /*
2093          * Clear any trailing newline for the NAA WWN
2094          */
2095 check_newline:
2096         if (i_port[strlen(i_port)-1] == '\n')
2097                 i_port[strlen(i_port)-1] = '\0';
2098
2099         ret = tcm_vhost_make_nexus(tpg, port_ptr);
2100         if (ret < 0)
2101                 return ret;
2102
2103         return count;
2104 }
2105
2106 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
2107
2108 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
2109         &tcm_vhost_tpg_nexus.attr,
2110         NULL,
2111 };
2112
2113 static struct se_portal_group *
2114 tcm_vhost_make_tpg(struct se_wwn *wwn,
2115                    struct config_group *group,
2116                    const char *name)
2117 {
2118         struct tcm_vhost_tport *tport = container_of(wwn,
2119                         struct tcm_vhost_tport, tport_wwn);
2120
2121         struct tcm_vhost_tpg *tpg;
2122         unsigned long tpgt;
2123         int ret;
2124
2125         if (strstr(name, "tpgt_") != name)
2126                 return ERR_PTR(-EINVAL);
2127         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2128                 return ERR_PTR(-EINVAL);
2129
2130         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
2131         if (!tpg) {
2132                 pr_err("Unable to allocate struct tcm_vhost_tpg");
2133                 return ERR_PTR(-ENOMEM);
2134         }
2135         mutex_init(&tpg->tv_tpg_mutex);
2136         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2137         tpg->tport = tport;
2138         tpg->tport_tpgt = tpgt;
2139
2140         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
2141                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2142         if (ret < 0) {
2143                 kfree(tpg);
2144                 return NULL;
2145         }
2146         mutex_lock(&tcm_vhost_mutex);
2147         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
2148         mutex_unlock(&tcm_vhost_mutex);
2149
2150         return &tpg->se_tpg;
2151 }
2152
2153 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2154 {
2155         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2156                                 struct tcm_vhost_tpg, se_tpg);
2157
2158         mutex_lock(&tcm_vhost_mutex);
2159         list_del(&tpg->tv_tpg_list);
2160         mutex_unlock(&tcm_vhost_mutex);
2161         /*
2162          * Release the virtual I_T Nexus for this vhost TPG
2163          */
2164         tcm_vhost_drop_nexus(tpg);
2165         /*
2166          * Deregister the se_tpg from TCM..
2167          */
2168         core_tpg_deregister(se_tpg);
2169         kfree(tpg);
2170 }
2171
2172 static struct se_wwn *
2173 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2174                      struct config_group *group,
2175                      const char *name)
2176 {
2177         struct tcm_vhost_tport *tport;
2178         char *ptr;
2179         u64 wwpn = 0;
2180         int off = 0;
2181
2182         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2183                 return ERR_PTR(-EINVAL); */
2184
2185         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2186         if (!tport) {
2187                 pr_err("Unable to allocate struct tcm_vhost_tport");
2188                 return ERR_PTR(-ENOMEM);
2189         }
2190         tport->tport_wwpn = wwpn;
2191         /*
2192          * Determine the emulated Protocol Identifier and Target Port Name
2193          * based on the incoming configfs directory name.
2194          */
2195         ptr = strstr(name, "naa.");
2196         if (ptr) {
2197                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2198                 goto check_len;
2199         }
2200         ptr = strstr(name, "fc.");
2201         if (ptr) {
2202                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2203                 off = 3; /* Skip over "fc." */
2204                 goto check_len;
2205         }
2206         ptr = strstr(name, "iqn.");
2207         if (ptr) {
2208                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2209                 goto check_len;
2210         }
2211
2212         pr_err("Unable to locate prefix for emulated Target Port:"
2213                         " %s\n", name);
2214         kfree(tport);
2215         return ERR_PTR(-EINVAL);
2216
2217 check_len:
2218         if (strlen(name) >= TCM_VHOST_NAMELEN) {
2219                 pr_err("Emulated %s Address: %s, exceeds"
2220                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2221                         TCM_VHOST_NAMELEN);
2222                 kfree(tport);
2223                 return ERR_PTR(-EINVAL);
2224         }
2225         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2226
2227         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2228                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2229
2230         return &tport->tport_wwn;
2231 }
2232
2233 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2234 {
2235         struct tcm_vhost_tport *tport = container_of(wwn,
2236                                 struct tcm_vhost_tport, tport_wwn);
2237
2238         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2239                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2240                 tport->tport_name);
2241
2242         kfree(tport);
2243 }
2244
2245 static ssize_t
2246 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2247                                 char *page)
2248 {
2249         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2250                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2251                 utsname()->machine);
2252 }
2253
2254 TF_WWN_ATTR_RO(tcm_vhost, version);
2255
2256 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2257         &tcm_vhost_wwn_version.attr,
2258         NULL,
2259 };
2260
2261 static struct target_core_fabric_ops tcm_vhost_ops = {
2262         .get_fabric_name                = tcm_vhost_get_fabric_name,
2263         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2264         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2265         .tpg_get_tag                    = tcm_vhost_get_tag,
2266         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2267         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2268         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2269         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2270         .tpg_check_demo_mode            = tcm_vhost_check_true,
2271         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2272         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2273         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2274         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2275         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2276         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2277         .release_cmd                    = tcm_vhost_release_cmd,
2278         .check_stop_free                = vhost_scsi_check_stop_free,
2279         .shutdown_session               = tcm_vhost_shutdown_session,
2280         .close_session                  = tcm_vhost_close_session,
2281         .sess_get_index                 = tcm_vhost_sess_get_index,
2282         .sess_get_initiator_sid         = NULL,
2283         .write_pending                  = tcm_vhost_write_pending,
2284         .write_pending_status           = tcm_vhost_write_pending_status,
2285         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2286         .get_task_tag                   = tcm_vhost_get_task_tag,
2287         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2288         .queue_data_in                  = tcm_vhost_queue_data_in,
2289         .queue_status                   = tcm_vhost_queue_status,
2290         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2291         .aborted_task                   = tcm_vhost_aborted_task,
2292         /*
2293          * Setup callers for generic logic in target_core_fabric_configfs.c
2294          */
2295         .fabric_make_wwn                = tcm_vhost_make_tport,
2296         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2297         .fabric_make_tpg                = tcm_vhost_make_tpg,
2298         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2299         .fabric_post_link               = tcm_vhost_port_link,
2300         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2301         .fabric_make_np                 = NULL,
2302         .fabric_drop_np                 = NULL,
2303         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2304         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2305 };
2306
2307 static int tcm_vhost_register_configfs(void)
2308 {
2309         struct target_fabric_configfs *fabric;
2310         int ret;
2311
2312         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2313                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2314                 utsname()->machine);
2315         /*
2316          * Register the top level struct config_item_type with TCM core
2317          */
2318         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2319         if (IS_ERR(fabric)) {
2320                 pr_err("target_fabric_configfs_init() failed\n");
2321                 return PTR_ERR(fabric);
2322         }
2323         /*
2324          * Setup fabric->tf_ops from our local tcm_vhost_ops
2325          */
2326         fabric->tf_ops = tcm_vhost_ops;
2327         /*
2328          * Setup default attribute lists for various fabric->tf_cit_tmpl
2329          */
2330         fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2331         fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2332         fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2333         fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2334         fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2335         fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2336         fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2337         fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2338         fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2339         /*
2340          * Register the fabric for use within TCM
2341          */
2342         ret = target_fabric_configfs_register(fabric);
2343         if (ret < 0) {
2344                 pr_err("target_fabric_configfs_register() failed"
2345                                 " for TCM_VHOST\n");
2346                 return ret;
2347         }
2348         /*
2349          * Setup our local pointer to *fabric
2350          */
2351         tcm_vhost_fabric_configfs = fabric;
2352         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2353         return 0;
2354 };
2355
2356 static void tcm_vhost_deregister_configfs(void)
2357 {
2358         if (!tcm_vhost_fabric_configfs)
2359                 return;
2360
2361         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2362         tcm_vhost_fabric_configfs = NULL;
2363         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2364 };
2365
2366 static int __init tcm_vhost_init(void)
2367 {
2368         int ret = -ENOMEM;
2369         /*
2370          * Use our own dedicated workqueue for submitting I/O into
2371          * target core to avoid contention within system_wq.
2372          */
2373         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2374         if (!tcm_vhost_workqueue)
2375                 goto out;
2376
2377         ret = vhost_scsi_register();
2378         if (ret < 0)
2379                 goto out_destroy_workqueue;
2380
2381         ret = tcm_vhost_register_configfs();
2382         if (ret < 0)
2383                 goto out_vhost_scsi_deregister;
2384
2385         return 0;
2386
2387 out_vhost_scsi_deregister:
2388         vhost_scsi_deregister();
2389 out_destroy_workqueue:
2390         destroy_workqueue(tcm_vhost_workqueue);
2391 out:
2392         return ret;
2393 };
2394
2395 static void tcm_vhost_exit(void)
2396 {
2397         tcm_vhost_deregister_configfs();
2398         vhost_scsi_deregister();
2399         destroy_workqueue(tcm_vhost_workqueue);
2400 };
2401
2402 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2403 MODULE_ALIAS("tcm_vhost");
2404 MODULE_LICENSE("GPL");
2405 module_init(tcm_vhost_init);
2406 module_exit(tcm_vhost_exit);