5396b8a3028ffc6691ca6f37fd5462c3e4d5911f
[firefly-linux-kernel-4.4.55.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
52
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_UPAGES 2048
61 #define TCM_VHOST_PREALLOC_PROT_SGLS 512
62
63 struct vhost_scsi_inflight {
64         /* Wait for the flush operation to finish */
65         struct completion comp;
66         /* Refcount for the inflight reqs */
67         struct kref kref;
68 };
69
70 struct tcm_vhost_cmd {
71         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72         int tvc_vq_desc;
73         /* virtio-scsi initiator task attribute */
74         int tvc_task_attr;
75         /* virtio-scsi response incoming iovecs */
76         int tvc_in_iovs;
77         /* virtio-scsi initiator data direction */
78         enum dma_data_direction tvc_data_direction;
79         /* Expected data transfer length from virtio-scsi header */
80         u32 tvc_exp_data_len;
81         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
82         u64 tvc_tag;
83         /* The number of scatterlists associated with this cmd */
84         u32 tvc_sgl_count;
85         u32 tvc_prot_sgl_count;
86         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
87         u32 tvc_lun;
88         /* Pointer to the SGL formatted memory from virtio-scsi */
89         struct scatterlist *tvc_sgl;
90         struct scatterlist *tvc_prot_sgl;
91         struct page **tvc_upages;
92         /* Pointer to response header iovec */
93         struct iovec *tvc_resp_iov;
94         /* Pointer to vhost_scsi for our device */
95         struct vhost_scsi *tvc_vhost;
96         /* Pointer to vhost_virtqueue for the cmd */
97         struct vhost_virtqueue *tvc_vq;
98         /* Pointer to vhost nexus memory */
99         struct tcm_vhost_nexus *tvc_nexus;
100         /* The TCM I/O descriptor that is accessed via container_of() */
101         struct se_cmd tvc_se_cmd;
102         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
103         struct work_struct work;
104         /* Copy of the incoming SCSI command descriptor block (CDB) */
105         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
106         /* Sense buffer that will be mapped into outgoing status */
107         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
108         /* Completed commands list, serviced from vhost worker thread */
109         struct llist_node tvc_completion_list;
110         /* Used to track inflight cmd */
111         struct vhost_scsi_inflight *inflight;
112 };
113
114 struct tcm_vhost_nexus {
115         /* Pointer to TCM session for I_T Nexus */
116         struct se_session *tvn_se_sess;
117 };
118
119 struct tcm_vhost_nacl {
120         /* Binary World Wide unique Port Name for Vhost Initiator port */
121         u64 iport_wwpn;
122         /* ASCII formatted WWPN for Sas Initiator port */
123         char iport_name[TCM_VHOST_NAMELEN];
124         /* Returned by tcm_vhost_make_nodeacl() */
125         struct se_node_acl se_node_acl;
126 };
127
128 struct tcm_vhost_tpg {
129         /* Vhost port target portal group tag for TCM */
130         u16 tport_tpgt;
131         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
132         int tv_tpg_port_count;
133         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
134         int tv_tpg_vhost_count;
135         /* list for tcm_vhost_list */
136         struct list_head tv_tpg_list;
137         /* Used to protect access for tpg_nexus */
138         struct mutex tv_tpg_mutex;
139         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
140         struct tcm_vhost_nexus *tpg_nexus;
141         /* Pointer back to tcm_vhost_tport */
142         struct tcm_vhost_tport *tport;
143         /* Returned by tcm_vhost_make_tpg() */
144         struct se_portal_group se_tpg;
145         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
146         struct vhost_scsi *vhost_scsi;
147 };
148
149 struct tcm_vhost_tport {
150         /* SCSI protocol the tport is providing */
151         u8 tport_proto_id;
152         /* Binary World Wide unique Port Name for Vhost Target port */
153         u64 tport_wwpn;
154         /* ASCII formatted WWPN for Vhost Target port */
155         char tport_name[TCM_VHOST_NAMELEN];
156         /* Returned by tcm_vhost_make_tport() */
157         struct se_wwn tport_wwn;
158 };
159
160 struct tcm_vhost_evt {
161         /* event to be sent to guest */
162         struct virtio_scsi_event event;
163         /* event list, serviced from vhost worker thread */
164         struct llist_node list;
165 };
166
167 enum {
168         VHOST_SCSI_VQ_CTL = 0,
169         VHOST_SCSI_VQ_EVT = 1,
170         VHOST_SCSI_VQ_IO = 2,
171 };
172
173 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
174 enum {
175         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
176                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
177 };
178
179 #define VHOST_SCSI_MAX_TARGET   256
180 #define VHOST_SCSI_MAX_VQ       128
181 #define VHOST_SCSI_MAX_EVENT    128
182
183 struct vhost_scsi_virtqueue {
184         struct vhost_virtqueue vq;
185         /*
186          * Reference counting for inflight reqs, used for flush operation. At
187          * each time, one reference tracks new commands submitted, while we
188          * wait for another one to reach 0.
189          */
190         struct vhost_scsi_inflight inflights[2];
191         /*
192          * Indicate current inflight in use, protected by vq->mutex.
193          * Writers must also take dev mutex and flush under it.
194          */
195         int inflight_idx;
196 };
197
198 struct vhost_scsi {
199         /* Protected by vhost_scsi->dev.mutex */
200         struct tcm_vhost_tpg **vs_tpg;
201         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
202
203         struct vhost_dev dev;
204         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
205
206         struct vhost_work vs_completion_work; /* cmd completion work item */
207         struct llist_head vs_completion_list; /* cmd completion queue */
208
209         struct vhost_work vs_event_work; /* evt injection work item */
210         struct llist_head vs_event_list; /* evt injection queue */
211
212         bool vs_events_missed; /* any missed events, protected by vq->mutex */
213         int vs_events_nr; /* num of pending events, protected by vq->mutex */
214 };
215
216 /* Local pointer to allocated TCM configfs fabric module */
217 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
218
219 static struct workqueue_struct *tcm_vhost_workqueue;
220
221 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
222 static DEFINE_MUTEX(tcm_vhost_mutex);
223 static LIST_HEAD(tcm_vhost_list);
224
225 static int iov_num_pages(void __user *iov_base, size_t iov_len)
226 {
227         return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
228                ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
229 }
230
231 static void tcm_vhost_done_inflight(struct kref *kref)
232 {
233         struct vhost_scsi_inflight *inflight;
234
235         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
236         complete(&inflight->comp);
237 }
238
239 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
240                                     struct vhost_scsi_inflight *old_inflight[])
241 {
242         struct vhost_scsi_inflight *new_inflight;
243         struct vhost_virtqueue *vq;
244         int idx, i;
245
246         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
247                 vq = &vs->vqs[i].vq;
248
249                 mutex_lock(&vq->mutex);
250
251                 /* store old infight */
252                 idx = vs->vqs[i].inflight_idx;
253                 if (old_inflight)
254                         old_inflight[i] = &vs->vqs[i].inflights[idx];
255
256                 /* setup new infight */
257                 vs->vqs[i].inflight_idx = idx ^ 1;
258                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
259                 kref_init(&new_inflight->kref);
260                 init_completion(&new_inflight->comp);
261
262                 mutex_unlock(&vq->mutex);
263         }
264 }
265
266 static struct vhost_scsi_inflight *
267 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
268 {
269         struct vhost_scsi_inflight *inflight;
270         struct vhost_scsi_virtqueue *svq;
271
272         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
273         inflight = &svq->inflights[svq->inflight_idx];
274         kref_get(&inflight->kref);
275
276         return inflight;
277 }
278
279 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
280 {
281         kref_put(&inflight->kref, tcm_vhost_done_inflight);
282 }
283
284 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
285 {
286         return 1;
287 }
288
289 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
290 {
291         return 0;
292 }
293
294 static char *tcm_vhost_get_fabric_name(void)
295 {
296         return "vhost";
297 }
298
299 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
300 {
301         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
302                                 struct tcm_vhost_tpg, se_tpg);
303         struct tcm_vhost_tport *tport = tpg->tport;
304
305         switch (tport->tport_proto_id) {
306         case SCSI_PROTOCOL_SAS:
307                 return sas_get_fabric_proto_ident(se_tpg);
308         case SCSI_PROTOCOL_FCP:
309                 return fc_get_fabric_proto_ident(se_tpg);
310         case SCSI_PROTOCOL_ISCSI:
311                 return iscsi_get_fabric_proto_ident(se_tpg);
312         default:
313                 pr_err("Unknown tport_proto_id: 0x%02x, using"
314                         " SAS emulation\n", tport->tport_proto_id);
315                 break;
316         }
317
318         return sas_get_fabric_proto_ident(se_tpg);
319 }
320
321 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
322 {
323         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
324                                 struct tcm_vhost_tpg, se_tpg);
325         struct tcm_vhost_tport *tport = tpg->tport;
326
327         return &tport->tport_name[0];
328 }
329
330 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
331 {
332         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
333                                 struct tcm_vhost_tpg, se_tpg);
334         return tpg->tport_tpgt;
335 }
336
337 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
338 {
339         return 1;
340 }
341
342 static u32
343 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
344                               struct se_node_acl *se_nacl,
345                               struct t10_pr_registration *pr_reg,
346                               int *format_code,
347                               unsigned char *buf)
348 {
349         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
350                                 struct tcm_vhost_tpg, se_tpg);
351         struct tcm_vhost_tport *tport = tpg->tport;
352
353         switch (tport->tport_proto_id) {
354         case SCSI_PROTOCOL_SAS:
355                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
356                                         format_code, buf);
357         case SCSI_PROTOCOL_FCP:
358                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
359                                         format_code, buf);
360         case SCSI_PROTOCOL_ISCSI:
361                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
362                                         format_code, buf);
363         default:
364                 pr_err("Unknown tport_proto_id: 0x%02x, using"
365                         " SAS emulation\n", tport->tport_proto_id);
366                 break;
367         }
368
369         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
370                         format_code, buf);
371 }
372
373 static u32
374 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
375                                   struct se_node_acl *se_nacl,
376                                   struct t10_pr_registration *pr_reg,
377                                   int *format_code)
378 {
379         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
380                                 struct tcm_vhost_tpg, se_tpg);
381         struct tcm_vhost_tport *tport = tpg->tport;
382
383         switch (tport->tport_proto_id) {
384         case SCSI_PROTOCOL_SAS:
385                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
386                                         format_code);
387         case SCSI_PROTOCOL_FCP:
388                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
389                                         format_code);
390         case SCSI_PROTOCOL_ISCSI:
391                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
392                                         format_code);
393         default:
394                 pr_err("Unknown tport_proto_id: 0x%02x, using"
395                         " SAS emulation\n", tport->tport_proto_id);
396                 break;
397         }
398
399         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
400                         format_code);
401 }
402
403 static char *
404 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
405                                     const char *buf,
406                                     u32 *out_tid_len,
407                                     char **port_nexus_ptr)
408 {
409         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
410                                 struct tcm_vhost_tpg, se_tpg);
411         struct tcm_vhost_tport *tport = tpg->tport;
412
413         switch (tport->tport_proto_id) {
414         case SCSI_PROTOCOL_SAS:
415                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
416                                         port_nexus_ptr);
417         case SCSI_PROTOCOL_FCP:
418                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
419                                         port_nexus_ptr);
420         case SCSI_PROTOCOL_ISCSI:
421                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
422                                         port_nexus_ptr);
423         default:
424                 pr_err("Unknown tport_proto_id: 0x%02x, using"
425                         " SAS emulation\n", tport->tport_proto_id);
426                 break;
427         }
428
429         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
430                         port_nexus_ptr);
431 }
432
433 static struct se_node_acl *
434 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
435 {
436         struct tcm_vhost_nacl *nacl;
437
438         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
439         if (!nacl) {
440                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
441                 return NULL;
442         }
443
444         return &nacl->se_node_acl;
445 }
446
447 static void
448 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
449                              struct se_node_acl *se_nacl)
450 {
451         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
452                         struct tcm_vhost_nacl, se_node_acl);
453         kfree(nacl);
454 }
455
456 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
457 {
458         return 1;
459 }
460
461 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
462 {
463         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
464                                 struct tcm_vhost_cmd, tvc_se_cmd);
465         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
466         int i;
467
468         if (tv_cmd->tvc_sgl_count) {
469                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
470                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
471         }
472         if (tv_cmd->tvc_prot_sgl_count) {
473                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
474                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
475         }
476
477         tcm_vhost_put_inflight(tv_cmd->inflight);
478         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
479 }
480
481 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
482 {
483         return 0;
484 }
485
486 static void tcm_vhost_close_session(struct se_session *se_sess)
487 {
488         return;
489 }
490
491 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
492 {
493         return 0;
494 }
495
496 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
497 {
498         /* Go ahead and process the write immediately */
499         target_execute_cmd(se_cmd);
500         return 0;
501 }
502
503 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
504 {
505         return 0;
506 }
507
508 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
509 {
510         return;
511 }
512
513 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
514 {
515         return 0;
516 }
517
518 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
519 {
520         return 0;
521 }
522
523 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
524 {
525         struct vhost_scsi *vs = cmd->tvc_vhost;
526
527         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
528
529         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
530 }
531
532 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
533 {
534         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
535                                 struct tcm_vhost_cmd, tvc_se_cmd);
536         vhost_scsi_complete_cmd(cmd);
537         return 0;
538 }
539
540 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
541 {
542         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
543                                 struct tcm_vhost_cmd, tvc_se_cmd);
544         vhost_scsi_complete_cmd(cmd);
545         return 0;
546 }
547
548 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
549 {
550         return;
551 }
552
553 static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
554 {
555         return;
556 }
557
558 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
559 {
560         vs->vs_events_nr--;
561         kfree(evt);
562 }
563
564 static struct tcm_vhost_evt *
565 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
566                        u32 event, u32 reason)
567 {
568         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
569         struct tcm_vhost_evt *evt;
570
571         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
572                 vs->vs_events_missed = true;
573                 return NULL;
574         }
575
576         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
577         if (!evt) {
578                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
579                 vs->vs_events_missed = true;
580                 return NULL;
581         }
582
583         evt->event.event = cpu_to_vhost32(vq, event);
584         evt->event.reason = cpu_to_vhost32(vq, reason);
585         vs->vs_events_nr++;
586
587         return evt;
588 }
589
590 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
591 {
592         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
593
594         /* TODO locking against target/backend threads? */
595         transport_generic_free_cmd(se_cmd, 0);
596
597 }
598
599 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
600 {
601         return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
602 }
603
604 static void
605 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
606 {
607         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
608         struct virtio_scsi_event *event = &evt->event;
609         struct virtio_scsi_event __user *eventp;
610         unsigned out, in;
611         int head, ret;
612
613         if (!vq->private_data) {
614                 vs->vs_events_missed = true;
615                 return;
616         }
617
618 again:
619         vhost_disable_notify(&vs->dev, vq);
620         head = vhost_get_vq_desc(vq, vq->iov,
621                         ARRAY_SIZE(vq->iov), &out, &in,
622                         NULL, NULL);
623         if (head < 0) {
624                 vs->vs_events_missed = true;
625                 return;
626         }
627         if (head == vq->num) {
628                 if (vhost_enable_notify(&vs->dev, vq))
629                         goto again;
630                 vs->vs_events_missed = true;
631                 return;
632         }
633
634         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
635                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
636                                 vq->iov[out].iov_len);
637                 vs->vs_events_missed = true;
638                 return;
639         }
640
641         if (vs->vs_events_missed) {
642                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
643                 vs->vs_events_missed = false;
644         }
645
646         eventp = vq->iov[out].iov_base;
647         ret = __copy_to_user(eventp, event, sizeof(*event));
648         if (!ret)
649                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
650         else
651                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
652 }
653
654 static void tcm_vhost_evt_work(struct vhost_work *work)
655 {
656         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
657                                         vs_event_work);
658         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
659         struct tcm_vhost_evt *evt;
660         struct llist_node *llnode;
661
662         mutex_lock(&vq->mutex);
663         llnode = llist_del_all(&vs->vs_event_list);
664         while (llnode) {
665                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
666                 llnode = llist_next(llnode);
667                 tcm_vhost_do_evt_work(vs, evt);
668                 tcm_vhost_free_evt(vs, evt);
669         }
670         mutex_unlock(&vq->mutex);
671 }
672
673 /* Fill in status and signal that we are done processing this command
674  *
675  * This is scheduled in the vhost work queue so we are called with the owner
676  * process mm and can access the vring.
677  */
678 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
679 {
680         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
681                                         vs_completion_work);
682         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
683         struct virtio_scsi_cmd_resp v_rsp;
684         struct tcm_vhost_cmd *cmd;
685         struct llist_node *llnode;
686         struct se_cmd *se_cmd;
687         struct iov_iter iov_iter;
688         int ret, vq;
689
690         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
691         llnode = llist_del_all(&vs->vs_completion_list);
692         while (llnode) {
693                 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
694                                      tvc_completion_list);
695                 llnode = llist_next(llnode);
696                 se_cmd = &cmd->tvc_se_cmd;
697
698                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
699                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
700
701                 memset(&v_rsp, 0, sizeof(v_rsp));
702                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
703                 /* TODO is status_qualifier field needed? */
704                 v_rsp.status = se_cmd->scsi_status;
705                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
706                                                  se_cmd->scsi_sense_length);
707                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
708                        se_cmd->scsi_sense_length);
709
710                 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
711                               cmd->tvc_in_iovs, sizeof(v_rsp));
712                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
713                 if (likely(ret == sizeof(v_rsp))) {
714                         struct vhost_scsi_virtqueue *q;
715                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
716                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
717                         vq = q - vs->vqs;
718                         __set_bit(vq, signal);
719                 } else
720                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
721
722                 vhost_scsi_free_cmd(cmd);
723         }
724
725         vq = -1;
726         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
727                 < VHOST_SCSI_MAX_VQ)
728                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
729 }
730
731 static struct tcm_vhost_cmd *
732 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
733                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
734                    u32 exp_data_len, int data_direction)
735 {
736         struct tcm_vhost_cmd *cmd;
737         struct tcm_vhost_nexus *tv_nexus;
738         struct se_session *se_sess;
739         struct scatterlist *sg, *prot_sg;
740         struct page **pages;
741         int tag;
742
743         tv_nexus = tpg->tpg_nexus;
744         if (!tv_nexus) {
745                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
746                 return ERR_PTR(-EIO);
747         }
748         se_sess = tv_nexus->tvn_se_sess;
749
750         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
751         if (tag < 0) {
752                 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
753                 return ERR_PTR(-ENOMEM);
754         }
755
756         cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
757         sg = cmd->tvc_sgl;
758         prot_sg = cmd->tvc_prot_sgl;
759         pages = cmd->tvc_upages;
760         memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
761
762         cmd->tvc_sgl = sg;
763         cmd->tvc_prot_sgl = prot_sg;
764         cmd->tvc_upages = pages;
765         cmd->tvc_se_cmd.map_tag = tag;
766         cmd->tvc_tag = scsi_tag;
767         cmd->tvc_lun = lun;
768         cmd->tvc_task_attr = task_attr;
769         cmd->tvc_exp_data_len = exp_data_len;
770         cmd->tvc_data_direction = data_direction;
771         cmd->tvc_nexus = tv_nexus;
772         cmd->inflight = tcm_vhost_get_inflight(vq);
773
774         memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
775
776         return cmd;
777 }
778
779 /*
780  * Map a user memory range into a scatterlist
781  *
782  * Returns the number of scatterlist entries used or -errno on error.
783  */
784 static int
785 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *cmd,
786                       void __user *ptr,
787                       size_t len,
788                       struct scatterlist *sgl,
789                       bool write)
790 {
791         unsigned int npages = 0, offset, nbytes;
792         unsigned int pages_nr = iov_num_pages(ptr, len);
793         struct scatterlist *sg = sgl;
794         struct page **pages = cmd->tvc_upages;
795         int ret, i;
796
797         if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
798                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
799                        " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
800                         pages_nr, TCM_VHOST_PREALLOC_UPAGES);
801                 return -ENOBUFS;
802         }
803
804         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
805         /* No pages were pinned */
806         if (ret < 0)
807                 goto out;
808         /* Less pages pinned than wanted */
809         if (ret != pages_nr) {
810                 for (i = 0; i < ret; i++)
811                         put_page(pages[i]);
812                 ret = -EFAULT;
813                 goto out;
814         }
815
816         while (len > 0) {
817                 offset = (uintptr_t)ptr & ~PAGE_MASK;
818                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
819                 sg_set_page(sg, pages[npages], nbytes, offset);
820                 ptr += nbytes;
821                 len -= nbytes;
822                 sg++;
823                 npages++;
824         }
825
826 out:
827         return ret;
828 }
829
830 static int
831 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
832                           struct iovec *iov,
833                           int niov,
834                           bool write)
835 {
836         struct scatterlist *sg = cmd->tvc_sgl;
837         unsigned int sgl_count = 0;
838         int ret, i;
839
840         for (i = 0; i < niov; i++)
841                 sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len);
842
843         if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
844                 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
845                         " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
846                         sgl_count, TCM_VHOST_PREALLOC_SGLS);
847                 return -ENOBUFS;
848         }
849
850         pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
851         sg_init_table(sg, sgl_count);
852         cmd->tvc_sgl_count = sgl_count;
853
854         pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
855
856         for (i = 0; i < niov; i++) {
857                 ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len,
858                                             sg, write);
859                 if (ret < 0) {
860                         for (i = 0; i < cmd->tvc_sgl_count; i++) {
861                                 struct page *page = sg_page(&cmd->tvc_sgl[i]);
862                                 if (page)
863                                         put_page(page);
864                         }
865                         cmd->tvc_sgl_count = 0;
866                         return ret;
867                 }
868                 sg += ret;
869                 sgl_count -= ret;
870         }
871         return 0;
872 }
873
874 static int
875 vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
876                            struct iovec *iov,
877                            int niov,
878                            bool write)
879 {
880         struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
881         unsigned int prot_sgl_count = 0;
882         int ret, i;
883
884         for (i = 0; i < niov; i++)
885                 prot_sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len);
886
887         if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
888                 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
889                         " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
890                         prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
891                 return -ENOBUFS;
892         }
893
894         pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
895                  prot_sg, prot_sgl_count);
896         sg_init_table(prot_sg, prot_sgl_count);
897         cmd->tvc_prot_sgl_count = prot_sgl_count;
898
899         for (i = 0; i < niov; i++) {
900                 ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len,
901                                             prot_sg, write);
902                 if (ret < 0) {
903                         for (i = 0; i < cmd->tvc_prot_sgl_count; i++) {
904                                 struct page *page = sg_page(&cmd->tvc_prot_sgl[i]);
905                                 if (page)
906                                         put_page(page);
907                         }
908                         cmd->tvc_prot_sgl_count = 0;
909                         return ret;
910                 }
911                 prot_sg += ret;
912                 prot_sgl_count -= ret;
913         }
914         return 0;
915 }
916
917 static int
918 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
919 {
920         int sgl_count = 0;
921
922         if (!iter || !iter->iov) {
923                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
924                        " present\n", __func__, bytes);
925                 return -EINVAL;
926         }
927
928         sgl_count = iov_iter_npages(iter, 0xffff);
929         if (sgl_count > max_sgls) {
930                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
931                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
932                 return -EINVAL;
933         }
934         return sgl_count;
935 }
936
937 static int
938 vhost_scsi_iov_to_sgl(struct tcm_vhost_cmd *cmd, bool write,
939                       struct iov_iter *iter, struct scatterlist *sg,
940                       int sg_count)
941 {
942         size_t off = iter->iov_offset;
943         int i, ret;
944
945         for (i = 0; i < iter->nr_segs; i++) {
946                 void __user *base = iter->iov[i].iov_base + off;
947                 size_t len = iter->iov[i].iov_len - off;
948
949                 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
950                 if (ret < 0) {
951                         for (i = 0; i < sg_count; i++) {
952                                 struct page *page = sg_page(&sg[i]);
953                                 if (page)
954                                         put_page(page);
955                         }
956                         return ret;
957                 }
958                 sg += ret;
959                 off = 0;
960         }
961         return 0;
962 }
963
964 static int
965 vhost_scsi_mapal(struct tcm_vhost_cmd *cmd,
966                  size_t prot_bytes, struct iov_iter *prot_iter,
967                  size_t data_bytes, struct iov_iter *data_iter)
968 {
969         int sgl_count, ret;
970         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
971
972         if (prot_bytes) {
973                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
974                                                  TCM_VHOST_PREALLOC_PROT_SGLS);
975                 if (sgl_count < 0)
976                         return sgl_count;
977
978                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
979                 cmd->tvc_prot_sgl_count = sgl_count;
980                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
981                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
982
983                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
984                                             cmd->tvc_prot_sgl,
985                                             cmd->tvc_prot_sgl_count);
986                 if (ret < 0) {
987                         cmd->tvc_prot_sgl_count = 0;
988                         return ret;
989                 }
990         }
991         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
992                                          TCM_VHOST_PREALLOC_SGLS);
993         if (sgl_count < 0)
994                 return sgl_count;
995
996         sg_init_table(cmd->tvc_sgl, sgl_count);
997         cmd->tvc_sgl_count = sgl_count;
998         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
999                   cmd->tvc_sgl, cmd->tvc_sgl_count);
1000
1001         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
1002                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
1003         if (ret < 0) {
1004                 cmd->tvc_sgl_count = 0;
1005                 return ret;
1006         }
1007         return 0;
1008 }
1009
1010 static void tcm_vhost_submission_work(struct work_struct *work)
1011 {
1012         struct tcm_vhost_cmd *cmd =
1013                 container_of(work, struct tcm_vhost_cmd, work);
1014         struct tcm_vhost_nexus *tv_nexus;
1015         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
1016         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
1017         int rc;
1018
1019         /* FIXME: BIDI operation */
1020         if (cmd->tvc_sgl_count) {
1021                 sg_ptr = cmd->tvc_sgl;
1022
1023                 if (cmd->tvc_prot_sgl_count)
1024                         sg_prot_ptr = cmd->tvc_prot_sgl;
1025                 else
1026                         se_cmd->prot_pto = true;
1027         } else {
1028                 sg_ptr = NULL;
1029         }
1030         tv_nexus = cmd->tvc_nexus;
1031
1032         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
1033                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
1034                         cmd->tvc_lun, cmd->tvc_exp_data_len,
1035                         cmd->tvc_task_attr, cmd->tvc_data_direction,
1036                         TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
1037                         NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
1038         if (rc < 0) {
1039                 transport_send_check_condition_and_sense(se_cmd,
1040                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
1041                 transport_generic_free_cmd(se_cmd, 0);
1042         }
1043 }
1044
1045 static void
1046 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
1047                            struct vhost_virtqueue *vq,
1048                            int head, unsigned out)
1049 {
1050         struct virtio_scsi_cmd_resp __user *resp;
1051         struct virtio_scsi_cmd_resp rsp;
1052         int ret;
1053
1054         memset(&rsp, 0, sizeof(rsp));
1055         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
1056         resp = vq->iov[out].iov_base;
1057         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
1058         if (!ret)
1059                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
1060         else
1061                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
1062 }
1063
1064 static void
1065 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1066 {
1067         struct tcm_vhost_tpg **vs_tpg;
1068         struct virtio_scsi_cmd_req v_req;
1069         struct virtio_scsi_cmd_req_pi v_req_pi;
1070         struct tcm_vhost_tpg *tpg;
1071         struct tcm_vhost_cmd *cmd;
1072         u64 tag;
1073         u32 exp_data_len, data_first, data_num, data_direction, prot_first;
1074         unsigned out, in, i;
1075         int head, ret, data_niov, prot_niov, prot_bytes;
1076         size_t req_size;
1077         u16 lun;
1078         u8 *target, *lunp, task_attr;
1079         bool hdr_pi;
1080         void *req, *cdb;
1081
1082         mutex_lock(&vq->mutex);
1083         /*
1084          * We can handle the vq only after the endpoint is setup by calling the
1085          * VHOST_SCSI_SET_ENDPOINT ioctl.
1086          */
1087         vs_tpg = vq->private_data;
1088         if (!vs_tpg)
1089                 goto out;
1090
1091         vhost_disable_notify(&vs->dev, vq);
1092
1093         for (;;) {
1094                 head = vhost_get_vq_desc(vq, vq->iov,
1095                                         ARRAY_SIZE(vq->iov), &out, &in,
1096                                         NULL, NULL);
1097                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1098                                         head, out, in);
1099                 /* On error, stop handling until the next kick. */
1100                 if (unlikely(head < 0))
1101                         break;
1102                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
1103                 if (head == vq->num) {
1104                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1105                                 vhost_disable_notify(&vs->dev, vq);
1106                                 continue;
1107                         }
1108                         break;
1109                 }
1110
1111                 /* FIXME: BIDI operation */
1112                 if (out == 1 && in == 1) {
1113                         data_direction = DMA_NONE;
1114                         data_first = 0;
1115                         data_num = 0;
1116                 } else if (out == 1 && in > 1) {
1117                         data_direction = DMA_FROM_DEVICE;
1118                         data_first = out + 1;
1119                         data_num = in - 1;
1120                 } else if (out > 1 && in == 1) {
1121                         data_direction = DMA_TO_DEVICE;
1122                         data_first = 1;
1123                         data_num = out - 1;
1124                 } else {
1125                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
1126                                         out, in);
1127                         break;
1128                 }
1129
1130                 /*
1131                  * Check for a sane resp buffer so we can report errors to
1132                  * the guest.
1133                  */
1134                 if (unlikely(vq->iov[out].iov_len !=
1135                                         sizeof(struct virtio_scsi_cmd_resp))) {
1136                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
1137                                 " bytes\n", vq->iov[out].iov_len);
1138                         break;
1139                 }
1140
1141                 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
1142                         req = &v_req_pi;
1143                         lunp = &v_req_pi.lun[0];
1144                         target = &v_req_pi.lun[1];
1145                         req_size = sizeof(v_req_pi);
1146                         hdr_pi = true;
1147                 } else {
1148                         req = &v_req;
1149                         lunp = &v_req.lun[0];
1150                         target = &v_req.lun[1];
1151                         req_size = sizeof(v_req);
1152                         hdr_pi = false;
1153                 }
1154
1155                 if (unlikely(vq->iov[0].iov_len < req_size)) {
1156                         pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
1157                                req_size, vq->iov[0].iov_len);
1158                         vhost_scsi_send_bad_target(vs, vq, head, out);
1159                         continue;
1160                 }
1161                 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
1162                 if (unlikely(ret)) {
1163                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1164                         vhost_scsi_send_bad_target(vs, vq, head, out);
1165                         continue;
1166                 }
1167
1168                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1169                 if (unlikely(*lunp != 1)) {
1170                         vhost_scsi_send_bad_target(vs, vq, head, out);
1171                         continue;
1172                 }
1173
1174                 tpg = ACCESS_ONCE(vs_tpg[*target]);
1175
1176                 /* Target does not exist, fail the request */
1177                 if (unlikely(!tpg)) {
1178                         vhost_scsi_send_bad_target(vs, vq, head, out);
1179                         continue;
1180                 }
1181
1182                 data_niov = data_num;
1183                 prot_niov = prot_first = prot_bytes = 0;
1184                 /*
1185                  * Determine if any protection information iovecs are preceeding
1186                  * the actual data payload, and adjust data_first + data_niov
1187                  * values accordingly for vhost_scsi_map_iov_to_sgl() below.
1188                  *
1189                  * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
1190                  */
1191                 if (hdr_pi) {
1192                         if (v_req_pi.pi_bytesout) {
1193                                 if (data_direction != DMA_TO_DEVICE) {
1194                                         vq_err(vq, "Received non zero do_pi_niov"
1195                                                 ", but wrong data_direction\n");
1196                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1197                                         continue;
1198                                 }
1199                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1200                         } else if (v_req_pi.pi_bytesin) {
1201                                 if (data_direction != DMA_FROM_DEVICE) {
1202                                         vq_err(vq, "Received non zero di_pi_niov"
1203                                                 ", but wrong data_direction\n");
1204                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1205                                         continue;
1206                                 }
1207                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1208                         }
1209                         if (prot_bytes) {
1210                                 int tmp = 0;
1211
1212                                 for (i = 0; i < data_num; i++) {
1213                                         tmp += vq->iov[data_first + i].iov_len;
1214                                         prot_niov++;
1215                                         if (tmp >= prot_bytes)
1216                                                 break;
1217                                 }
1218                                 prot_first = data_first;
1219                                 data_first += prot_niov;
1220                                 data_niov = data_num - prot_niov;
1221                         }
1222                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
1223                         task_attr = v_req_pi.task_attr;
1224                         cdb = &v_req_pi.cdb[0];
1225                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1226                 } else {
1227                         tag = vhost64_to_cpu(vq, v_req.tag);
1228                         task_attr = v_req.task_attr;
1229                         cdb = &v_req.cdb[0];
1230                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1231                 }
1232                 exp_data_len = 0;
1233                 for (i = 0; i < data_niov; i++)
1234                         exp_data_len += vq->iov[data_first + i].iov_len;
1235                 /*
1236                  * Check that the recieved CDB size does not exceeded our
1237                  * hardcoded max for vhost-scsi
1238                  *
1239                  * TODO what if cdb was too small for varlen cdb header?
1240                  */
1241                 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
1242                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1243                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1244                                 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
1245                         vhost_scsi_send_bad_target(vs, vq, head, out);
1246                         continue;
1247                 }
1248
1249                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1250                                          exp_data_len + prot_bytes,
1251                                          data_direction);
1252                 if (IS_ERR(cmd)) {
1253                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1254                                         PTR_ERR(cmd));
1255                         vhost_scsi_send_bad_target(vs, vq, head, out);
1256                         continue;
1257                 }
1258
1259                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1260                         ": %d\n", cmd, exp_data_len, data_direction);
1261
1262                 cmd->tvc_vhost = vs;
1263                 cmd->tvc_vq = vq;
1264                 cmd->tvc_resp_iov = &vq->iov[out];
1265                 cmd->tvc_in_iovs = in;
1266
1267                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1268                         cmd->tvc_cdb[0], cmd->tvc_lun);
1269
1270                 if (prot_niov) {
1271                         ret = vhost_scsi_map_iov_to_prot(cmd,
1272                                         &vq->iov[prot_first], prot_niov,
1273                                         data_direction == DMA_FROM_DEVICE);
1274                         if (unlikely(ret)) {
1275                                 vq_err(vq, "Failed to map iov to"
1276                                         " prot_sgl\n");
1277                                 tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
1278                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1279                                 continue;
1280                         }
1281                 }
1282                 if (data_direction != DMA_NONE) {
1283                         ret = vhost_scsi_map_iov_to_sgl(cmd,
1284                                         &vq->iov[data_first], data_niov,
1285                                         data_direction == DMA_FROM_DEVICE);
1286                         if (unlikely(ret)) {
1287                                 vq_err(vq, "Failed to map iov to sgl\n");
1288                                 tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
1289                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1290                                 continue;
1291                         }
1292                 }
1293                 /*
1294                  * Save the descriptor from vhost_get_vq_desc() to be used to
1295                  * complete the virtio-scsi request in TCM callback context via
1296                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1297                  */
1298                 cmd->tvc_vq_desc = head;
1299                 /*
1300                  * Dispatch tv_cmd descriptor for cmwq execution in process
1301                  * context provided by tcm_vhost_workqueue.  This also ensures
1302                  * tv_cmd is executed on the same kworker CPU as this vhost
1303                  * thread to gain positive L2 cache locality effects..
1304                  */
1305                 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1306                 queue_work(tcm_vhost_workqueue, &cmd->work);
1307         }
1308 out:
1309         mutex_unlock(&vq->mutex);
1310 }
1311
1312 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1313 {
1314         pr_debug("%s: The handling func for control queue.\n", __func__);
1315 }
1316
1317 static void
1318 tcm_vhost_send_evt(struct vhost_scsi *vs,
1319                    struct tcm_vhost_tpg *tpg,
1320                    struct se_lun *lun,
1321                    u32 event,
1322                    u32 reason)
1323 {
1324         struct tcm_vhost_evt *evt;
1325
1326         evt = tcm_vhost_allocate_evt(vs, event, reason);
1327         if (!evt)
1328                 return;
1329
1330         if (tpg && lun) {
1331                 /* TODO: share lun setup code with virtio-scsi.ko */
1332                 /*
1333                  * Note: evt->event is zeroed when we allocate it and
1334                  * lun[4-7] need to be zero according to virtio-scsi spec.
1335                  */
1336                 evt->event.lun[0] = 0x01;
1337                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1338                 if (lun->unpacked_lun >= 256)
1339                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1340                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1341         }
1342
1343         llist_add(&evt->list, &vs->vs_event_list);
1344         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1345 }
1346
1347 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1348 {
1349         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1350                                                 poll.work);
1351         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1352
1353         mutex_lock(&vq->mutex);
1354         if (!vq->private_data)
1355                 goto out;
1356
1357         if (vs->vs_events_missed)
1358                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1359 out:
1360         mutex_unlock(&vq->mutex);
1361 }
1362
1363 static void vhost_scsi_handle_kick(struct vhost_work *work)
1364 {
1365         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1366                                                 poll.work);
1367         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1368
1369         vhost_scsi_handle_vq(vs, vq);
1370 }
1371
1372 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1373 {
1374         vhost_poll_flush(&vs->vqs[index].vq.poll);
1375 }
1376
1377 /* Callers must hold dev mutex */
1378 static void vhost_scsi_flush(struct vhost_scsi *vs)
1379 {
1380         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1381         int i;
1382
1383         /* Init new inflight and remember the old inflight */
1384         tcm_vhost_init_inflight(vs, old_inflight);
1385
1386         /*
1387          * The inflight->kref was initialized to 1. We decrement it here to
1388          * indicate the start of the flush operation so that it will reach 0
1389          * when all the reqs are finished.
1390          */
1391         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1392                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1393
1394         /* Flush both the vhost poll and vhost work */
1395         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1396                 vhost_scsi_flush_vq(vs, i);
1397         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1398         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1399
1400         /* Wait for all reqs issued before the flush to be finished */
1401         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1402                 wait_for_completion(&old_inflight[i]->comp);
1403 }
1404
1405 /*
1406  * Called from vhost_scsi_ioctl() context to walk the list of available
1407  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1408  *
1409  *  The lock nesting rule is:
1410  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1411  */
1412 static int
1413 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1414                         struct vhost_scsi_target *t)
1415 {
1416         struct se_portal_group *se_tpg;
1417         struct tcm_vhost_tport *tv_tport;
1418         struct tcm_vhost_tpg *tpg;
1419         struct tcm_vhost_tpg **vs_tpg;
1420         struct vhost_virtqueue *vq;
1421         int index, ret, i, len;
1422         bool match = false;
1423
1424         mutex_lock(&tcm_vhost_mutex);
1425         mutex_lock(&vs->dev.mutex);
1426
1427         /* Verify that ring has been setup correctly. */
1428         for (index = 0; index < vs->dev.nvqs; ++index) {
1429                 /* Verify that ring has been setup correctly. */
1430                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1431                         ret = -EFAULT;
1432                         goto out;
1433                 }
1434         }
1435
1436         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1437         vs_tpg = kzalloc(len, GFP_KERNEL);
1438         if (!vs_tpg) {
1439                 ret = -ENOMEM;
1440                 goto out;
1441         }
1442         if (vs->vs_tpg)
1443                 memcpy(vs_tpg, vs->vs_tpg, len);
1444
1445         list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1446                 mutex_lock(&tpg->tv_tpg_mutex);
1447                 if (!tpg->tpg_nexus) {
1448                         mutex_unlock(&tpg->tv_tpg_mutex);
1449                         continue;
1450                 }
1451                 if (tpg->tv_tpg_vhost_count != 0) {
1452                         mutex_unlock(&tpg->tv_tpg_mutex);
1453                         continue;
1454                 }
1455                 tv_tport = tpg->tport;
1456
1457                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1458                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1459                                 kfree(vs_tpg);
1460                                 mutex_unlock(&tpg->tv_tpg_mutex);
1461                                 ret = -EEXIST;
1462                                 goto out;
1463                         }
1464                         /*
1465                          * In order to ensure individual vhost-scsi configfs
1466                          * groups cannot be removed while in use by vhost ioctl,
1467                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1468                          * dependency now.
1469                          */
1470                         se_tpg = &tpg->se_tpg;
1471                         ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1472                                                    &se_tpg->tpg_group.cg_item);
1473                         if (ret) {
1474                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1475                                 kfree(vs_tpg);
1476                                 mutex_unlock(&tpg->tv_tpg_mutex);
1477                                 goto out;
1478                         }
1479                         tpg->tv_tpg_vhost_count++;
1480                         tpg->vhost_scsi = vs;
1481                         vs_tpg[tpg->tport_tpgt] = tpg;
1482                         smp_mb__after_atomic();
1483                         match = true;
1484                 }
1485                 mutex_unlock(&tpg->tv_tpg_mutex);
1486         }
1487
1488         if (match) {
1489                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1490                        sizeof(vs->vs_vhost_wwpn));
1491                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1492                         vq = &vs->vqs[i].vq;
1493                         mutex_lock(&vq->mutex);
1494                         vq->private_data = vs_tpg;
1495                         vhost_init_used(vq);
1496                         mutex_unlock(&vq->mutex);
1497                 }
1498                 ret = 0;
1499         } else {
1500                 ret = -EEXIST;
1501         }
1502
1503         /*
1504          * Act as synchronize_rcu to make sure access to
1505          * old vs->vs_tpg is finished.
1506          */
1507         vhost_scsi_flush(vs);
1508         kfree(vs->vs_tpg);
1509         vs->vs_tpg = vs_tpg;
1510
1511 out:
1512         mutex_unlock(&vs->dev.mutex);
1513         mutex_unlock(&tcm_vhost_mutex);
1514         return ret;
1515 }
1516
1517 static int
1518 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1519                           struct vhost_scsi_target *t)
1520 {
1521         struct se_portal_group *se_tpg;
1522         struct tcm_vhost_tport *tv_tport;
1523         struct tcm_vhost_tpg *tpg;
1524         struct vhost_virtqueue *vq;
1525         bool match = false;
1526         int index, ret, i;
1527         u8 target;
1528
1529         mutex_lock(&tcm_vhost_mutex);
1530         mutex_lock(&vs->dev.mutex);
1531         /* Verify that ring has been setup correctly. */
1532         for (index = 0; index < vs->dev.nvqs; ++index) {
1533                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1534                         ret = -EFAULT;
1535                         goto err_dev;
1536                 }
1537         }
1538
1539         if (!vs->vs_tpg) {
1540                 ret = 0;
1541                 goto err_dev;
1542         }
1543
1544         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1545                 target = i;
1546                 tpg = vs->vs_tpg[target];
1547                 if (!tpg)
1548                         continue;
1549
1550                 mutex_lock(&tpg->tv_tpg_mutex);
1551                 tv_tport = tpg->tport;
1552                 if (!tv_tport) {
1553                         ret = -ENODEV;
1554                         goto err_tpg;
1555                 }
1556
1557                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1558                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1559                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1560                                 tv_tport->tport_name, tpg->tport_tpgt,
1561                                 t->vhost_wwpn, t->vhost_tpgt);
1562                         ret = -EINVAL;
1563                         goto err_tpg;
1564                 }
1565                 tpg->tv_tpg_vhost_count--;
1566                 tpg->vhost_scsi = NULL;
1567                 vs->vs_tpg[target] = NULL;
1568                 match = true;
1569                 mutex_unlock(&tpg->tv_tpg_mutex);
1570                 /*
1571                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1572                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1573                  */
1574                 se_tpg = &tpg->se_tpg;
1575                 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1576                                        &se_tpg->tpg_group.cg_item);
1577         }
1578         if (match) {
1579                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1580                         vq = &vs->vqs[i].vq;
1581                         mutex_lock(&vq->mutex);
1582                         vq->private_data = NULL;
1583                         mutex_unlock(&vq->mutex);
1584                 }
1585         }
1586         /*
1587          * Act as synchronize_rcu to make sure access to
1588          * old vs->vs_tpg is finished.
1589          */
1590         vhost_scsi_flush(vs);
1591         kfree(vs->vs_tpg);
1592         vs->vs_tpg = NULL;
1593         WARN_ON(vs->vs_events_nr);
1594         mutex_unlock(&vs->dev.mutex);
1595         mutex_unlock(&tcm_vhost_mutex);
1596         return 0;
1597
1598 err_tpg:
1599         mutex_unlock(&tpg->tv_tpg_mutex);
1600 err_dev:
1601         mutex_unlock(&vs->dev.mutex);
1602         mutex_unlock(&tcm_vhost_mutex);
1603         return ret;
1604 }
1605
1606 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1607 {
1608         struct vhost_virtqueue *vq;
1609         int i;
1610
1611         if (features & ~VHOST_SCSI_FEATURES)
1612                 return -EOPNOTSUPP;
1613
1614         mutex_lock(&vs->dev.mutex);
1615         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1616             !vhost_log_access_ok(&vs->dev)) {
1617                 mutex_unlock(&vs->dev.mutex);
1618                 return -EFAULT;
1619         }
1620
1621         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1622                 vq = &vs->vqs[i].vq;
1623                 mutex_lock(&vq->mutex);
1624                 vq->acked_features = features;
1625                 mutex_unlock(&vq->mutex);
1626         }
1627         mutex_unlock(&vs->dev.mutex);
1628         return 0;
1629 }
1630
1631 static int vhost_scsi_open(struct inode *inode, struct file *f)
1632 {
1633         struct vhost_scsi *vs;
1634         struct vhost_virtqueue **vqs;
1635         int r = -ENOMEM, i;
1636
1637         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1638         if (!vs) {
1639                 vs = vzalloc(sizeof(*vs));
1640                 if (!vs)
1641                         goto err_vs;
1642         }
1643
1644         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1645         if (!vqs)
1646                 goto err_vqs;
1647
1648         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1649         vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1650
1651         vs->vs_events_nr = 0;
1652         vs->vs_events_missed = false;
1653
1654         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1655         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1656         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1657         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1658         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1659                 vqs[i] = &vs->vqs[i].vq;
1660                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1661         }
1662         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1663
1664         tcm_vhost_init_inflight(vs, NULL);
1665
1666         f->private_data = vs;
1667         return 0;
1668
1669 err_vqs:
1670         kvfree(vs);
1671 err_vs:
1672         return r;
1673 }
1674
1675 static int vhost_scsi_release(struct inode *inode, struct file *f)
1676 {
1677         struct vhost_scsi *vs = f->private_data;
1678         struct vhost_scsi_target t;
1679
1680         mutex_lock(&vs->dev.mutex);
1681         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1682         mutex_unlock(&vs->dev.mutex);
1683         vhost_scsi_clear_endpoint(vs, &t);
1684         vhost_dev_stop(&vs->dev);
1685         vhost_dev_cleanup(&vs->dev, false);
1686         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1687         vhost_scsi_flush(vs);
1688         kfree(vs->dev.vqs);
1689         kvfree(vs);
1690         return 0;
1691 }
1692
1693 static long
1694 vhost_scsi_ioctl(struct file *f,
1695                  unsigned int ioctl,
1696                  unsigned long arg)
1697 {
1698         struct vhost_scsi *vs = f->private_data;
1699         struct vhost_scsi_target backend;
1700         void __user *argp = (void __user *)arg;
1701         u64 __user *featurep = argp;
1702         u32 __user *eventsp = argp;
1703         u32 events_missed;
1704         u64 features;
1705         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1706         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1707
1708         switch (ioctl) {
1709         case VHOST_SCSI_SET_ENDPOINT:
1710                 if (copy_from_user(&backend, argp, sizeof backend))
1711                         return -EFAULT;
1712                 if (backend.reserved != 0)
1713                         return -EOPNOTSUPP;
1714
1715                 return vhost_scsi_set_endpoint(vs, &backend);
1716         case VHOST_SCSI_CLEAR_ENDPOINT:
1717                 if (copy_from_user(&backend, argp, sizeof backend))
1718                         return -EFAULT;
1719                 if (backend.reserved != 0)
1720                         return -EOPNOTSUPP;
1721
1722                 return vhost_scsi_clear_endpoint(vs, &backend);
1723         case VHOST_SCSI_GET_ABI_VERSION:
1724                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1725                         return -EFAULT;
1726                 return 0;
1727         case VHOST_SCSI_SET_EVENTS_MISSED:
1728                 if (get_user(events_missed, eventsp))
1729                         return -EFAULT;
1730                 mutex_lock(&vq->mutex);
1731                 vs->vs_events_missed = events_missed;
1732                 mutex_unlock(&vq->mutex);
1733                 return 0;
1734         case VHOST_SCSI_GET_EVENTS_MISSED:
1735                 mutex_lock(&vq->mutex);
1736                 events_missed = vs->vs_events_missed;
1737                 mutex_unlock(&vq->mutex);
1738                 if (put_user(events_missed, eventsp))
1739                         return -EFAULT;
1740                 return 0;
1741         case VHOST_GET_FEATURES:
1742                 features = VHOST_SCSI_FEATURES;
1743                 if (copy_to_user(featurep, &features, sizeof features))
1744                         return -EFAULT;
1745                 return 0;
1746         case VHOST_SET_FEATURES:
1747                 if (copy_from_user(&features, featurep, sizeof features))
1748                         return -EFAULT;
1749                 return vhost_scsi_set_features(vs, features);
1750         default:
1751                 mutex_lock(&vs->dev.mutex);
1752                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1753                 /* TODO: flush backend after dev ioctl. */
1754                 if (r == -ENOIOCTLCMD)
1755                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1756                 mutex_unlock(&vs->dev.mutex);
1757                 return r;
1758         }
1759 }
1760
1761 #ifdef CONFIG_COMPAT
1762 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1763                                 unsigned long arg)
1764 {
1765         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1766 }
1767 #endif
1768
1769 static const struct file_operations vhost_scsi_fops = {
1770         .owner          = THIS_MODULE,
1771         .release        = vhost_scsi_release,
1772         .unlocked_ioctl = vhost_scsi_ioctl,
1773 #ifdef CONFIG_COMPAT
1774         .compat_ioctl   = vhost_scsi_compat_ioctl,
1775 #endif
1776         .open           = vhost_scsi_open,
1777         .llseek         = noop_llseek,
1778 };
1779
1780 static struct miscdevice vhost_scsi_misc = {
1781         MISC_DYNAMIC_MINOR,
1782         "vhost-scsi",
1783         &vhost_scsi_fops,
1784 };
1785
1786 static int __init vhost_scsi_register(void)
1787 {
1788         return misc_register(&vhost_scsi_misc);
1789 }
1790
1791 static int vhost_scsi_deregister(void)
1792 {
1793         return misc_deregister(&vhost_scsi_misc);
1794 }
1795
1796 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1797 {
1798         switch (tport->tport_proto_id) {
1799         case SCSI_PROTOCOL_SAS:
1800                 return "SAS";
1801         case SCSI_PROTOCOL_FCP:
1802                 return "FCP";
1803         case SCSI_PROTOCOL_ISCSI:
1804                 return "iSCSI";
1805         default:
1806                 break;
1807         }
1808
1809         return "Unknown";
1810 }
1811
1812 static void
1813 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1814                   struct se_lun *lun, bool plug)
1815 {
1816
1817         struct vhost_scsi *vs = tpg->vhost_scsi;
1818         struct vhost_virtqueue *vq;
1819         u32 reason;
1820
1821         if (!vs)
1822                 return;
1823
1824         mutex_lock(&vs->dev.mutex);
1825
1826         if (plug)
1827                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1828         else
1829                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1830
1831         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1832         mutex_lock(&vq->mutex);
1833         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1834                 tcm_vhost_send_evt(vs, tpg, lun,
1835                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1836         mutex_unlock(&vq->mutex);
1837         mutex_unlock(&vs->dev.mutex);
1838 }
1839
1840 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1841 {
1842         tcm_vhost_do_plug(tpg, lun, true);
1843 }
1844
1845 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1846 {
1847         tcm_vhost_do_plug(tpg, lun, false);
1848 }
1849
1850 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1851                                struct se_lun *lun)
1852 {
1853         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1854                                 struct tcm_vhost_tpg, se_tpg);
1855
1856         mutex_lock(&tcm_vhost_mutex);
1857
1858         mutex_lock(&tpg->tv_tpg_mutex);
1859         tpg->tv_tpg_port_count++;
1860         mutex_unlock(&tpg->tv_tpg_mutex);
1861
1862         tcm_vhost_hotplug(tpg, lun);
1863
1864         mutex_unlock(&tcm_vhost_mutex);
1865
1866         return 0;
1867 }
1868
1869 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1870                                   struct se_lun *lun)
1871 {
1872         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1873                                 struct tcm_vhost_tpg, se_tpg);
1874
1875         mutex_lock(&tcm_vhost_mutex);
1876
1877         mutex_lock(&tpg->tv_tpg_mutex);
1878         tpg->tv_tpg_port_count--;
1879         mutex_unlock(&tpg->tv_tpg_mutex);
1880
1881         tcm_vhost_hotunplug(tpg, lun);
1882
1883         mutex_unlock(&tcm_vhost_mutex);
1884 }
1885
1886 static struct se_node_acl *
1887 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1888                        struct config_group *group,
1889                        const char *name)
1890 {
1891         struct se_node_acl *se_nacl, *se_nacl_new;
1892         struct tcm_vhost_nacl *nacl;
1893         u64 wwpn = 0;
1894         u32 nexus_depth;
1895
1896         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1897                 return ERR_PTR(-EINVAL); */
1898         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1899         if (!se_nacl_new)
1900                 return ERR_PTR(-ENOMEM);
1901
1902         nexus_depth = 1;
1903         /*
1904          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1905          * when converting a NodeACL from demo mode -> explict
1906          */
1907         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1908                                 name, nexus_depth);
1909         if (IS_ERR(se_nacl)) {
1910                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1911                 return se_nacl;
1912         }
1913         /*
1914          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1915          */
1916         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1917         nacl->iport_wwpn = wwpn;
1918
1919         return se_nacl;
1920 }
1921
1922 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1923 {
1924         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1925                                 struct tcm_vhost_nacl, se_node_acl);
1926         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1927         kfree(nacl);
1928 }
1929
1930 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1931                                        struct se_session *se_sess)
1932 {
1933         struct tcm_vhost_cmd *tv_cmd;
1934         unsigned int i;
1935
1936         if (!se_sess->sess_cmd_map)
1937                 return;
1938
1939         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1940                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1941
1942                 kfree(tv_cmd->tvc_sgl);
1943                 kfree(tv_cmd->tvc_prot_sgl);
1944                 kfree(tv_cmd->tvc_upages);
1945         }
1946 }
1947
1948 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1949                                 const char *name)
1950 {
1951         struct se_portal_group *se_tpg;
1952         struct se_session *se_sess;
1953         struct tcm_vhost_nexus *tv_nexus;
1954         struct tcm_vhost_cmd *tv_cmd;
1955         unsigned int i;
1956
1957         mutex_lock(&tpg->tv_tpg_mutex);
1958         if (tpg->tpg_nexus) {
1959                 mutex_unlock(&tpg->tv_tpg_mutex);
1960                 pr_debug("tpg->tpg_nexus already exists\n");
1961                 return -EEXIST;
1962         }
1963         se_tpg = &tpg->se_tpg;
1964
1965         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1966         if (!tv_nexus) {
1967                 mutex_unlock(&tpg->tv_tpg_mutex);
1968                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1969                 return -ENOMEM;
1970         }
1971         /*
1972          *  Initialize the struct se_session pointer and setup tagpool
1973          *  for struct tcm_vhost_cmd descriptors
1974          */
1975         tv_nexus->tvn_se_sess = transport_init_session_tags(
1976                                         TCM_VHOST_DEFAULT_TAGS,
1977                                         sizeof(struct tcm_vhost_cmd),
1978                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1979         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1980                 mutex_unlock(&tpg->tv_tpg_mutex);
1981                 kfree(tv_nexus);
1982                 return -ENOMEM;
1983         }
1984         se_sess = tv_nexus->tvn_se_sess;
1985         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1986                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1987
1988                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1989                                         TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1990                 if (!tv_cmd->tvc_sgl) {
1991                         mutex_unlock(&tpg->tv_tpg_mutex);
1992                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1993                         goto out;
1994                 }
1995
1996                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1997                                         TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
1998                 if (!tv_cmd->tvc_upages) {
1999                         mutex_unlock(&tpg->tv_tpg_mutex);
2000                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
2001                         goto out;
2002                 }
2003
2004                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
2005                                         TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
2006                 if (!tv_cmd->tvc_prot_sgl) {
2007                         mutex_unlock(&tpg->tv_tpg_mutex);
2008                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
2009                         goto out;
2010                 }
2011         }
2012         /*
2013          * Since we are running in 'demo mode' this call with generate a
2014          * struct se_node_acl for the tcm_vhost struct se_portal_group with
2015          * the SCSI Initiator port name of the passed configfs group 'name'.
2016          */
2017         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
2018                                 se_tpg, (unsigned char *)name);
2019         if (!tv_nexus->tvn_se_sess->se_node_acl) {
2020                 mutex_unlock(&tpg->tv_tpg_mutex);
2021                 pr_debug("core_tpg_check_initiator_node_acl() failed"
2022                                 " for %s\n", name);
2023                 goto out;
2024         }
2025         /*
2026          * Now register the TCM vhost virtual I_T Nexus as active with the
2027          * call to __transport_register_session()
2028          */
2029         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
2030                         tv_nexus->tvn_se_sess, tv_nexus);
2031         tpg->tpg_nexus = tv_nexus;
2032
2033         mutex_unlock(&tpg->tv_tpg_mutex);
2034         return 0;
2035
2036 out:
2037         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
2038         transport_free_session(se_sess);
2039         kfree(tv_nexus);
2040         return -ENOMEM;
2041 }
2042
2043 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
2044 {
2045         struct se_session *se_sess;
2046         struct tcm_vhost_nexus *tv_nexus;
2047
2048         mutex_lock(&tpg->tv_tpg_mutex);
2049         tv_nexus = tpg->tpg_nexus;
2050         if (!tv_nexus) {
2051                 mutex_unlock(&tpg->tv_tpg_mutex);
2052                 return -ENODEV;
2053         }
2054
2055         se_sess = tv_nexus->tvn_se_sess;
2056         if (!se_sess) {
2057                 mutex_unlock(&tpg->tv_tpg_mutex);
2058                 return -ENODEV;
2059         }
2060
2061         if (tpg->tv_tpg_port_count != 0) {
2062                 mutex_unlock(&tpg->tv_tpg_mutex);
2063                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2064                         " active TPG port count: %d\n",
2065                         tpg->tv_tpg_port_count);
2066                 return -EBUSY;
2067         }
2068
2069         if (tpg->tv_tpg_vhost_count != 0) {
2070                 mutex_unlock(&tpg->tv_tpg_mutex);
2071                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2072                         " active TPG vhost count: %d\n",
2073                         tpg->tv_tpg_vhost_count);
2074                 return -EBUSY;
2075         }
2076
2077         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2078                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
2079                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2080
2081         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
2082         /*
2083          * Release the SCSI I_T Nexus to the emulated vhost Target Port
2084          */
2085         transport_deregister_session(tv_nexus->tvn_se_sess);
2086         tpg->tpg_nexus = NULL;
2087         mutex_unlock(&tpg->tv_tpg_mutex);
2088
2089         kfree(tv_nexus);
2090         return 0;
2091 }
2092
2093 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
2094                                         char *page)
2095 {
2096         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2097                                 struct tcm_vhost_tpg, se_tpg);
2098         struct tcm_vhost_nexus *tv_nexus;
2099         ssize_t ret;
2100
2101         mutex_lock(&tpg->tv_tpg_mutex);
2102         tv_nexus = tpg->tpg_nexus;
2103         if (!tv_nexus) {
2104                 mutex_unlock(&tpg->tv_tpg_mutex);
2105                 return -ENODEV;
2106         }
2107         ret = snprintf(page, PAGE_SIZE, "%s\n",
2108                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2109         mutex_unlock(&tpg->tv_tpg_mutex);
2110
2111         return ret;
2112 }
2113
2114 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2115                                          const char *page,
2116                                          size_t count)
2117 {
2118         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2119                                 struct tcm_vhost_tpg, se_tpg);
2120         struct tcm_vhost_tport *tport_wwn = tpg->tport;
2121         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
2122         int ret;
2123         /*
2124          * Shutdown the active I_T nexus if 'NULL' is passed..
2125          */
2126         if (!strncmp(page, "NULL", 4)) {
2127                 ret = tcm_vhost_drop_nexus(tpg);
2128                 return (!ret) ? count : ret;
2129         }
2130         /*
2131          * Otherwise make sure the passed virtual Initiator port WWN matches
2132          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
2133          * tcm_vhost_make_nexus().
2134          */
2135         if (strlen(page) >= TCM_VHOST_NAMELEN) {
2136                 pr_err("Emulated NAA Sas Address: %s, exceeds"
2137                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
2138                 return -EINVAL;
2139         }
2140         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
2141
2142         ptr = strstr(i_port, "naa.");
2143         if (ptr) {
2144                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2145                         pr_err("Passed SAS Initiator Port %s does not"
2146                                 " match target port protoid: %s\n", i_port,
2147                                 tcm_vhost_dump_proto_id(tport_wwn));
2148                         return -EINVAL;
2149                 }
2150                 port_ptr = &i_port[0];
2151                 goto check_newline;
2152         }
2153         ptr = strstr(i_port, "fc.");
2154         if (ptr) {
2155                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2156                         pr_err("Passed FCP Initiator Port %s does not"
2157                                 " match target port protoid: %s\n", i_port,
2158                                 tcm_vhost_dump_proto_id(tport_wwn));
2159                         return -EINVAL;
2160                 }
2161                 port_ptr = &i_port[3]; /* Skip over "fc." */
2162                 goto check_newline;
2163         }
2164         ptr = strstr(i_port, "iqn.");
2165         if (ptr) {
2166                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2167                         pr_err("Passed iSCSI Initiator Port %s does not"
2168                                 " match target port protoid: %s\n", i_port,
2169                                 tcm_vhost_dump_proto_id(tport_wwn));
2170                         return -EINVAL;
2171                 }
2172                 port_ptr = &i_port[0];
2173                 goto check_newline;
2174         }
2175         pr_err("Unable to locate prefix for emulated Initiator Port:"
2176                         " %s\n", i_port);
2177         return -EINVAL;
2178         /*
2179          * Clear any trailing newline for the NAA WWN
2180          */
2181 check_newline:
2182         if (i_port[strlen(i_port)-1] == '\n')
2183                 i_port[strlen(i_port)-1] = '\0';
2184
2185         ret = tcm_vhost_make_nexus(tpg, port_ptr);
2186         if (ret < 0)
2187                 return ret;
2188
2189         return count;
2190 }
2191
2192 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
2193
2194 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
2195         &tcm_vhost_tpg_nexus.attr,
2196         NULL,
2197 };
2198
2199 static struct se_portal_group *
2200 tcm_vhost_make_tpg(struct se_wwn *wwn,
2201                    struct config_group *group,
2202                    const char *name)
2203 {
2204         struct tcm_vhost_tport *tport = container_of(wwn,
2205                         struct tcm_vhost_tport, tport_wwn);
2206
2207         struct tcm_vhost_tpg *tpg;
2208         unsigned long tpgt;
2209         int ret;
2210
2211         if (strstr(name, "tpgt_") != name)
2212                 return ERR_PTR(-EINVAL);
2213         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2214                 return ERR_PTR(-EINVAL);
2215
2216         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
2217         if (!tpg) {
2218                 pr_err("Unable to allocate struct tcm_vhost_tpg");
2219                 return ERR_PTR(-ENOMEM);
2220         }
2221         mutex_init(&tpg->tv_tpg_mutex);
2222         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2223         tpg->tport = tport;
2224         tpg->tport_tpgt = tpgt;
2225
2226         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
2227                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2228         if (ret < 0) {
2229                 kfree(tpg);
2230                 return NULL;
2231         }
2232         mutex_lock(&tcm_vhost_mutex);
2233         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
2234         mutex_unlock(&tcm_vhost_mutex);
2235
2236         return &tpg->se_tpg;
2237 }
2238
2239 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2240 {
2241         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2242                                 struct tcm_vhost_tpg, se_tpg);
2243
2244         mutex_lock(&tcm_vhost_mutex);
2245         list_del(&tpg->tv_tpg_list);
2246         mutex_unlock(&tcm_vhost_mutex);
2247         /*
2248          * Release the virtual I_T Nexus for this vhost TPG
2249          */
2250         tcm_vhost_drop_nexus(tpg);
2251         /*
2252          * Deregister the se_tpg from TCM..
2253          */
2254         core_tpg_deregister(se_tpg);
2255         kfree(tpg);
2256 }
2257
2258 static struct se_wwn *
2259 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2260                      struct config_group *group,
2261                      const char *name)
2262 {
2263         struct tcm_vhost_tport *tport;
2264         char *ptr;
2265         u64 wwpn = 0;
2266         int off = 0;
2267
2268         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2269                 return ERR_PTR(-EINVAL); */
2270
2271         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2272         if (!tport) {
2273                 pr_err("Unable to allocate struct tcm_vhost_tport");
2274                 return ERR_PTR(-ENOMEM);
2275         }
2276         tport->tport_wwpn = wwpn;
2277         /*
2278          * Determine the emulated Protocol Identifier and Target Port Name
2279          * based on the incoming configfs directory name.
2280          */
2281         ptr = strstr(name, "naa.");
2282         if (ptr) {
2283                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2284                 goto check_len;
2285         }
2286         ptr = strstr(name, "fc.");
2287         if (ptr) {
2288                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2289                 off = 3; /* Skip over "fc." */
2290                 goto check_len;
2291         }
2292         ptr = strstr(name, "iqn.");
2293         if (ptr) {
2294                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2295                 goto check_len;
2296         }
2297
2298         pr_err("Unable to locate prefix for emulated Target Port:"
2299                         " %s\n", name);
2300         kfree(tport);
2301         return ERR_PTR(-EINVAL);
2302
2303 check_len:
2304         if (strlen(name) >= TCM_VHOST_NAMELEN) {
2305                 pr_err("Emulated %s Address: %s, exceeds"
2306                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2307                         TCM_VHOST_NAMELEN);
2308                 kfree(tport);
2309                 return ERR_PTR(-EINVAL);
2310         }
2311         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2312
2313         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2314                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2315
2316         return &tport->tport_wwn;
2317 }
2318
2319 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2320 {
2321         struct tcm_vhost_tport *tport = container_of(wwn,
2322                                 struct tcm_vhost_tport, tport_wwn);
2323
2324         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2325                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2326                 tport->tport_name);
2327
2328         kfree(tport);
2329 }
2330
2331 static ssize_t
2332 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2333                                 char *page)
2334 {
2335         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2336                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2337                 utsname()->machine);
2338 }
2339
2340 TF_WWN_ATTR_RO(tcm_vhost, version);
2341
2342 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2343         &tcm_vhost_wwn_version.attr,
2344         NULL,
2345 };
2346
2347 static struct target_core_fabric_ops tcm_vhost_ops = {
2348         .get_fabric_name                = tcm_vhost_get_fabric_name,
2349         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2350         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2351         .tpg_get_tag                    = tcm_vhost_get_tag,
2352         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2353         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2354         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2355         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2356         .tpg_check_demo_mode            = tcm_vhost_check_true,
2357         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2358         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2359         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2360         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2361         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2362         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2363         .release_cmd                    = tcm_vhost_release_cmd,
2364         .check_stop_free                = vhost_scsi_check_stop_free,
2365         .shutdown_session               = tcm_vhost_shutdown_session,
2366         .close_session                  = tcm_vhost_close_session,
2367         .sess_get_index                 = tcm_vhost_sess_get_index,
2368         .sess_get_initiator_sid         = NULL,
2369         .write_pending                  = tcm_vhost_write_pending,
2370         .write_pending_status           = tcm_vhost_write_pending_status,
2371         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2372         .get_task_tag                   = tcm_vhost_get_task_tag,
2373         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2374         .queue_data_in                  = tcm_vhost_queue_data_in,
2375         .queue_status                   = tcm_vhost_queue_status,
2376         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2377         .aborted_task                   = tcm_vhost_aborted_task,
2378         /*
2379          * Setup callers for generic logic in target_core_fabric_configfs.c
2380          */
2381         .fabric_make_wwn                = tcm_vhost_make_tport,
2382         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2383         .fabric_make_tpg                = tcm_vhost_make_tpg,
2384         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2385         .fabric_post_link               = tcm_vhost_port_link,
2386         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2387         .fabric_make_np                 = NULL,
2388         .fabric_drop_np                 = NULL,
2389         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2390         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2391 };
2392
2393 static int tcm_vhost_register_configfs(void)
2394 {
2395         struct target_fabric_configfs *fabric;
2396         int ret;
2397
2398         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2399                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2400                 utsname()->machine);
2401         /*
2402          * Register the top level struct config_item_type with TCM core
2403          */
2404         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2405         if (IS_ERR(fabric)) {
2406                 pr_err("target_fabric_configfs_init() failed\n");
2407                 return PTR_ERR(fabric);
2408         }
2409         /*
2410          * Setup fabric->tf_ops from our local tcm_vhost_ops
2411          */
2412         fabric->tf_ops = tcm_vhost_ops;
2413         /*
2414          * Setup default attribute lists for various fabric->tf_cit_tmpl
2415          */
2416         fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2417         fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2418         fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2419         fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2420         fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2421         fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2422         fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2423         fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2424         fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2425         /*
2426          * Register the fabric for use within TCM
2427          */
2428         ret = target_fabric_configfs_register(fabric);
2429         if (ret < 0) {
2430                 pr_err("target_fabric_configfs_register() failed"
2431                                 " for TCM_VHOST\n");
2432                 return ret;
2433         }
2434         /*
2435          * Setup our local pointer to *fabric
2436          */
2437         tcm_vhost_fabric_configfs = fabric;
2438         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2439         return 0;
2440 };
2441
2442 static void tcm_vhost_deregister_configfs(void)
2443 {
2444         if (!tcm_vhost_fabric_configfs)
2445                 return;
2446
2447         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2448         tcm_vhost_fabric_configfs = NULL;
2449         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2450 };
2451
2452 static int __init tcm_vhost_init(void)
2453 {
2454         int ret = -ENOMEM;
2455         /*
2456          * Use our own dedicated workqueue for submitting I/O into
2457          * target core to avoid contention within system_wq.
2458          */
2459         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2460         if (!tcm_vhost_workqueue)
2461                 goto out;
2462
2463         ret = vhost_scsi_register();
2464         if (ret < 0)
2465                 goto out_destroy_workqueue;
2466
2467         ret = tcm_vhost_register_configfs();
2468         if (ret < 0)
2469                 goto out_vhost_scsi_deregister;
2470
2471         return 0;
2472
2473 out_vhost_scsi_deregister:
2474         vhost_scsi_deregister();
2475 out_destroy_workqueue:
2476         destroy_workqueue(tcm_vhost_workqueue);
2477 out:
2478         return ret;
2479 };
2480
2481 static void tcm_vhost_exit(void)
2482 {
2483         tcm_vhost_deregister_configfs();
2484         vhost_scsi_deregister();
2485         destroy_workqueue(tcm_vhost_workqueue);
2486 };
2487
2488 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2489 MODULE_ALIAS("tcm_vhost");
2490 MODULE_LICENSE("GPL");
2491 module_init(tcm_vhost_init);
2492 module_exit(tcm_vhost_exit);