Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2012 RisingTide Systems LLC.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51
52 #include "vhost.c"
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58
59 struct vhost_scsi_inflight {
60         /* Wait for the flush operation to finish */
61         struct completion comp;
62         /* Refcount for the inflight reqs */
63         struct kref kref;
64 };
65
66 struct tcm_vhost_cmd {
67         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
68         int tvc_vq_desc;
69         /* virtio-scsi initiator task attribute */
70         int tvc_task_attr;
71         /* virtio-scsi initiator data direction */
72         enum dma_data_direction tvc_data_direction;
73         /* Expected data transfer length from virtio-scsi header */
74         u32 tvc_exp_data_len;
75         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
76         u64 tvc_tag;
77         /* The number of scatterlists associated with this cmd */
78         u32 tvc_sgl_count;
79         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
80         u32 tvc_lun;
81         /* Pointer to the SGL formatted memory from virtio-scsi */
82         struct scatterlist *tvc_sgl;
83         /* Pointer to response */
84         struct virtio_scsi_cmd_resp __user *tvc_resp;
85         /* Pointer to vhost_scsi for our device */
86         struct vhost_scsi *tvc_vhost;
87         /* Pointer to vhost_virtqueue for the cmd */
88         struct vhost_virtqueue *tvc_vq;
89         /* Pointer to vhost nexus memory */
90         struct tcm_vhost_nexus *tvc_nexus;
91         /* The TCM I/O descriptor that is accessed via container_of() */
92         struct se_cmd tvc_se_cmd;
93         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
94         struct work_struct work;
95         /* Copy of the incoming SCSI command descriptor block (CDB) */
96         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
97         /* Sense buffer that will be mapped into outgoing status */
98         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
99         /* Completed commands list, serviced from vhost worker thread */
100         struct llist_node tvc_completion_list;
101         /* Used to track inflight cmd */
102         struct vhost_scsi_inflight *inflight;
103 };
104
105 struct tcm_vhost_nexus {
106         /* Pointer to TCM session for I_T Nexus */
107         struct se_session *tvn_se_sess;
108 };
109
110 struct tcm_vhost_nacl {
111         /* Binary World Wide unique Port Name for Vhost Initiator port */
112         u64 iport_wwpn;
113         /* ASCII formatted WWPN for Sas Initiator port */
114         char iport_name[TCM_VHOST_NAMELEN];
115         /* Returned by tcm_vhost_make_nodeacl() */
116         struct se_node_acl se_node_acl;
117 };
118
119 struct vhost_scsi;
120 struct tcm_vhost_tpg {
121         /* Vhost port target portal group tag for TCM */
122         u16 tport_tpgt;
123         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
124         int tv_tpg_port_count;
125         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
126         int tv_tpg_vhost_count;
127         /* list for tcm_vhost_list */
128         struct list_head tv_tpg_list;
129         /* Used to protect access for tpg_nexus */
130         struct mutex tv_tpg_mutex;
131         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
132         struct tcm_vhost_nexus *tpg_nexus;
133         /* Pointer back to tcm_vhost_tport */
134         struct tcm_vhost_tport *tport;
135         /* Returned by tcm_vhost_make_tpg() */
136         struct se_portal_group se_tpg;
137         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
138         struct vhost_scsi *vhost_scsi;
139 };
140
141 struct tcm_vhost_tport {
142         /* SCSI protocol the tport is providing */
143         u8 tport_proto_id;
144         /* Binary World Wide unique Port Name for Vhost Target port */
145         u64 tport_wwpn;
146         /* ASCII formatted WWPN for Vhost Target port */
147         char tport_name[TCM_VHOST_NAMELEN];
148         /* Returned by tcm_vhost_make_tport() */
149         struct se_wwn tport_wwn;
150 };
151
152 struct tcm_vhost_evt {
153         /* event to be sent to guest */
154         struct virtio_scsi_event event;
155         /* event list, serviced from vhost worker thread */
156         struct llist_node list;
157 };
158
159 enum {
160         VHOST_SCSI_VQ_CTL = 0,
161         VHOST_SCSI_VQ_EVT = 1,
162         VHOST_SCSI_VQ_IO = 2,
163 };
164
165 enum {
166         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
167 };
168
169 #define VHOST_SCSI_MAX_TARGET   256
170 #define VHOST_SCSI_MAX_VQ       128
171 #define VHOST_SCSI_MAX_EVENT    128
172
173 struct vhost_scsi_virtqueue {
174         struct vhost_virtqueue vq;
175         /*
176          * Reference counting for inflight reqs, used for flush operation. At
177          * each time, one reference tracks new commands submitted, while we
178          * wait for another one to reach 0.
179          */
180         struct vhost_scsi_inflight inflights[2];
181         /*
182          * Indicate current inflight in use, protected by vq->mutex.
183          * Writers must also take dev mutex and flush under it.
184          */
185         int inflight_idx;
186 };
187
188 struct vhost_scsi {
189         /* Protected by vhost_scsi->dev.mutex */
190         struct tcm_vhost_tpg **vs_tpg;
191         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
192
193         struct vhost_dev dev;
194         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
195
196         struct vhost_work vs_completion_work; /* cmd completion work item */
197         struct llist_head vs_completion_list; /* cmd completion queue */
198
199         struct vhost_work vs_event_work; /* evt injection work item */
200         struct llist_head vs_event_list; /* evt injection queue */
201
202         bool vs_events_missed; /* any missed events, protected by vq->mutex */
203         int vs_events_nr; /* num of pending events, protected by vq->mutex */
204 };
205
206 /* Local pointer to allocated TCM configfs fabric module */
207 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
208
209 static struct workqueue_struct *tcm_vhost_workqueue;
210
211 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
212 static DEFINE_MUTEX(tcm_vhost_mutex);
213 static LIST_HEAD(tcm_vhost_list);
214
215 static int iov_num_pages(struct iovec *iov)
216 {
217         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
218                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
219 }
220
221 void tcm_vhost_done_inflight(struct kref *kref)
222 {
223         struct vhost_scsi_inflight *inflight;
224
225         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
226         complete(&inflight->comp);
227 }
228
229 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
230                                     struct vhost_scsi_inflight *old_inflight[])
231 {
232         struct vhost_scsi_inflight *new_inflight;
233         struct vhost_virtqueue *vq;
234         int idx, i;
235
236         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
237                 vq = &vs->vqs[i].vq;
238
239                 mutex_lock(&vq->mutex);
240
241                 /* store old infight */
242                 idx = vs->vqs[i].inflight_idx;
243                 if (old_inflight)
244                         old_inflight[i] = &vs->vqs[i].inflights[idx];
245
246                 /* setup new infight */
247                 vs->vqs[i].inflight_idx = idx ^ 1;
248                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
249                 kref_init(&new_inflight->kref);
250                 init_completion(&new_inflight->comp);
251
252                 mutex_unlock(&vq->mutex);
253         }
254 }
255
256 static struct vhost_scsi_inflight *
257 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
258 {
259         struct vhost_scsi_inflight *inflight;
260         struct vhost_scsi_virtqueue *svq;
261
262         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
263         inflight = &svq->inflights[svq->inflight_idx];
264         kref_get(&inflight->kref);
265
266         return inflight;
267 }
268
269 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
270 {
271         kref_put(&inflight->kref, tcm_vhost_done_inflight);
272 }
273
274 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
275 {
276         return 1;
277 }
278
279 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
280 {
281         return 0;
282 }
283
284 static char *tcm_vhost_get_fabric_name(void)
285 {
286         return "vhost";
287 }
288
289 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
290 {
291         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
292                                 struct tcm_vhost_tpg, se_tpg);
293         struct tcm_vhost_tport *tport = tpg->tport;
294
295         switch (tport->tport_proto_id) {
296         case SCSI_PROTOCOL_SAS:
297                 return sas_get_fabric_proto_ident(se_tpg);
298         case SCSI_PROTOCOL_FCP:
299                 return fc_get_fabric_proto_ident(se_tpg);
300         case SCSI_PROTOCOL_ISCSI:
301                 return iscsi_get_fabric_proto_ident(se_tpg);
302         default:
303                 pr_err("Unknown tport_proto_id: 0x%02x, using"
304                         " SAS emulation\n", tport->tport_proto_id);
305                 break;
306         }
307
308         return sas_get_fabric_proto_ident(se_tpg);
309 }
310
311 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
312 {
313         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
314                                 struct tcm_vhost_tpg, se_tpg);
315         struct tcm_vhost_tport *tport = tpg->tport;
316
317         return &tport->tport_name[0];
318 }
319
320 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
321 {
322         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
323                                 struct tcm_vhost_tpg, se_tpg);
324         return tpg->tport_tpgt;
325 }
326
327 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
328 {
329         return 1;
330 }
331
332 static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
333         struct se_node_acl *se_nacl,
334         struct t10_pr_registration *pr_reg,
335         int *format_code,
336         unsigned char *buf)
337 {
338         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
339                                 struct tcm_vhost_tpg, se_tpg);
340         struct tcm_vhost_tport *tport = tpg->tport;
341
342         switch (tport->tport_proto_id) {
343         case SCSI_PROTOCOL_SAS:
344                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
345                                         format_code, buf);
346         case SCSI_PROTOCOL_FCP:
347                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
348                                         format_code, buf);
349         case SCSI_PROTOCOL_ISCSI:
350                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
351                                         format_code, buf);
352         default:
353                 pr_err("Unknown tport_proto_id: 0x%02x, using"
354                         " SAS emulation\n", tport->tport_proto_id);
355                 break;
356         }
357
358         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
359                         format_code, buf);
360 }
361
362 static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
363         struct se_node_acl *se_nacl,
364         struct t10_pr_registration *pr_reg,
365         int *format_code)
366 {
367         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
368                                 struct tcm_vhost_tpg, se_tpg);
369         struct tcm_vhost_tport *tport = tpg->tport;
370
371         switch (tport->tport_proto_id) {
372         case SCSI_PROTOCOL_SAS:
373                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
374                                         format_code);
375         case SCSI_PROTOCOL_FCP:
376                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
377                                         format_code);
378         case SCSI_PROTOCOL_ISCSI:
379                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
380                                         format_code);
381         default:
382                 pr_err("Unknown tport_proto_id: 0x%02x, using"
383                         " SAS emulation\n", tport->tport_proto_id);
384                 break;
385         }
386
387         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
388                         format_code);
389 }
390
391 static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
392         const char *buf,
393         u32 *out_tid_len,
394         char **port_nexus_ptr)
395 {
396         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
397                                 struct tcm_vhost_tpg, se_tpg);
398         struct tcm_vhost_tport *tport = tpg->tport;
399
400         switch (tport->tport_proto_id) {
401         case SCSI_PROTOCOL_SAS:
402                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
403                                         port_nexus_ptr);
404         case SCSI_PROTOCOL_FCP:
405                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
406                                         port_nexus_ptr);
407         case SCSI_PROTOCOL_ISCSI:
408                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
409                                         port_nexus_ptr);
410         default:
411                 pr_err("Unknown tport_proto_id: 0x%02x, using"
412                         " SAS emulation\n", tport->tport_proto_id);
413                 break;
414         }
415
416         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
417                         port_nexus_ptr);
418 }
419
420 static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
421         struct se_portal_group *se_tpg)
422 {
423         struct tcm_vhost_nacl *nacl;
424
425         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
426         if (!nacl) {
427                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
428                 return NULL;
429         }
430
431         return &nacl->se_node_acl;
432 }
433
434 static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
435         struct se_node_acl *se_nacl)
436 {
437         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
438                         struct tcm_vhost_nacl, se_node_acl);
439         kfree(nacl);
440 }
441
442 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
443 {
444         return 1;
445 }
446
447 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
448 {
449         return;
450 }
451
452 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
453 {
454         return 0;
455 }
456
457 static void tcm_vhost_close_session(struct se_session *se_sess)
458 {
459         return;
460 }
461
462 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
463 {
464         return 0;
465 }
466
467 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
468 {
469         /* Go ahead and process the write immediately */
470         target_execute_cmd(se_cmd);
471         return 0;
472 }
473
474 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
475 {
476         return 0;
477 }
478
479 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
480 {
481         return;
482 }
483
484 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
485 {
486         return 0;
487 }
488
489 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
490 {
491         return 0;
492 }
493
494 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
495 {
496         struct vhost_scsi *vs = tv_cmd->tvc_vhost;
497
498         llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
499
500         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
501 }
502
503 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
504 {
505         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
506                                 struct tcm_vhost_cmd, tvc_se_cmd);
507         vhost_scsi_complete_cmd(tv_cmd);
508         return 0;
509 }
510
511 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
512 {
513         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
514                                 struct tcm_vhost_cmd, tvc_se_cmd);
515         vhost_scsi_complete_cmd(tv_cmd);
516         return 0;
517 }
518
519 static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
520 {
521         return 0;
522 }
523
524 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
525 {
526         vs->vs_events_nr--;
527         kfree(evt);
528 }
529
530 static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
531         u32 event, u32 reason)
532 {
533         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
534         struct tcm_vhost_evt *evt;
535
536         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
537                 vs->vs_events_missed = true;
538                 return NULL;
539         }
540
541         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
542         if (!evt) {
543                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
544                 vs->vs_events_missed = true;
545                 return NULL;
546         }
547
548         evt->event.event = event;
549         evt->event.reason = reason;
550         vs->vs_events_nr++;
551
552         return evt;
553 }
554
555 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
556 {
557         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
558
559         /* TODO locking against target/backend threads? */
560         transport_generic_free_cmd(se_cmd, 1);
561
562         if (tv_cmd->tvc_sgl_count) {
563                 u32 i;
564                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
565                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
566
567                 kfree(tv_cmd->tvc_sgl);
568         }
569
570         tcm_vhost_put_inflight(tv_cmd->inflight);
571
572         kfree(tv_cmd);
573 }
574
575 static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
576         struct tcm_vhost_evt *evt)
577 {
578         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
579         struct virtio_scsi_event *event = &evt->event;
580         struct virtio_scsi_event __user *eventp;
581         unsigned out, in;
582         int head, ret;
583
584         if (!vq->private_data) {
585                 vs->vs_events_missed = true;
586                 return;
587         }
588
589 again:
590         vhost_disable_notify(&vs->dev, vq);
591         head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
592                         ARRAY_SIZE(vq->iov), &out, &in,
593                         NULL, NULL);
594         if (head < 0) {
595                 vs->vs_events_missed = true;
596                 return;
597         }
598         if (head == vq->num) {
599                 if (vhost_enable_notify(&vs->dev, vq))
600                         goto again;
601                 vs->vs_events_missed = true;
602                 return;
603         }
604
605         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
606                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
607                                 vq->iov[out].iov_len);
608                 vs->vs_events_missed = true;
609                 return;
610         }
611
612         if (vs->vs_events_missed) {
613                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
614                 vs->vs_events_missed = false;
615         }
616
617         eventp = vq->iov[out].iov_base;
618         ret = __copy_to_user(eventp, event, sizeof(*event));
619         if (!ret)
620                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
621         else
622                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
623 }
624
625 static void tcm_vhost_evt_work(struct vhost_work *work)
626 {
627         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
628                                         vs_event_work);
629         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
630         struct tcm_vhost_evt *evt;
631         struct llist_node *llnode;
632
633         mutex_lock(&vq->mutex);
634         llnode = llist_del_all(&vs->vs_event_list);
635         while (llnode) {
636                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
637                 llnode = llist_next(llnode);
638                 tcm_vhost_do_evt_work(vs, evt);
639                 tcm_vhost_free_evt(vs, evt);
640         }
641         mutex_unlock(&vq->mutex);
642 }
643
644 /* Fill in status and signal that we are done processing this command
645  *
646  * This is scheduled in the vhost work queue so we are called with the owner
647  * process mm and can access the vring.
648  */
649 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
650 {
651         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
652                                         vs_completion_work);
653         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
654         struct virtio_scsi_cmd_resp v_rsp;
655         struct tcm_vhost_cmd *tv_cmd;
656         struct llist_node *llnode;
657         struct se_cmd *se_cmd;
658         int ret, vq;
659
660         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
661         llnode = llist_del_all(&vs->vs_completion_list);
662         while (llnode) {
663                 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
664                                      tvc_completion_list);
665                 llnode = llist_next(llnode);
666                 se_cmd = &tv_cmd->tvc_se_cmd;
667
668                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
669                         tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
670
671                 memset(&v_rsp, 0, sizeof(v_rsp));
672                 v_rsp.resid = se_cmd->residual_count;
673                 /* TODO is status_qualifier field needed? */
674                 v_rsp.status = se_cmd->scsi_status;
675                 v_rsp.sense_len = se_cmd->scsi_sense_length;
676                 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
677                        v_rsp.sense_len);
678                 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
679                 if (likely(ret == 0)) {
680                         struct vhost_scsi_virtqueue *q;
681                         vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
682                         q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
683                         vq = q - vs->vqs;
684                         __set_bit(vq, signal);
685                 } else
686                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
687
688                 vhost_scsi_free_cmd(tv_cmd);
689         }
690
691         vq = -1;
692         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
693                 < VHOST_SCSI_MAX_VQ)
694                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
695 }
696
697 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
698         struct vhost_virtqueue *vq,
699         struct tcm_vhost_tpg *tv_tpg,
700         struct virtio_scsi_cmd_req *v_req,
701         u32 exp_data_len,
702         int data_direction)
703 {
704         struct tcm_vhost_cmd *tv_cmd;
705         struct tcm_vhost_nexus *tv_nexus;
706
707         tv_nexus = tv_tpg->tpg_nexus;
708         if (!tv_nexus) {
709                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
710                 return ERR_PTR(-EIO);
711         }
712
713         tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
714         if (!tv_cmd) {
715                 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
716                 return ERR_PTR(-ENOMEM);
717         }
718         tv_cmd->tvc_tag = v_req->tag;
719         tv_cmd->tvc_task_attr = v_req->task_attr;
720         tv_cmd->tvc_exp_data_len = exp_data_len;
721         tv_cmd->tvc_data_direction = data_direction;
722         tv_cmd->tvc_nexus = tv_nexus;
723         tv_cmd->inflight = tcm_vhost_get_inflight(vq);
724
725         return tv_cmd;
726 }
727
728 /*
729  * Map a user memory range into a scatterlist
730  *
731  * Returns the number of scatterlist entries used or -errno on error.
732  */
733 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
734         unsigned int sgl_count, struct iovec *iov, int write)
735 {
736         unsigned int npages = 0, pages_nr, offset, nbytes;
737         struct scatterlist *sg = sgl;
738         void __user *ptr = iov->iov_base;
739         size_t len = iov->iov_len;
740         struct page **pages;
741         int ret, i;
742
743         pages_nr = iov_num_pages(iov);
744         if (pages_nr > sgl_count)
745                 return -ENOBUFS;
746
747         pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
748         if (!pages)
749                 return -ENOMEM;
750
751         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
752         /* No pages were pinned */
753         if (ret < 0)
754                 goto out;
755         /* Less pages pinned than wanted */
756         if (ret != pages_nr) {
757                 for (i = 0; i < ret; i++)
758                         put_page(pages[i]);
759                 ret = -EFAULT;
760                 goto out;
761         }
762
763         while (len > 0) {
764                 offset = (uintptr_t)ptr & ~PAGE_MASK;
765                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
766                 sg_set_page(sg, pages[npages], nbytes, offset);
767                 ptr += nbytes;
768                 len -= nbytes;
769                 sg++;
770                 npages++;
771         }
772
773 out:
774         kfree(pages);
775         return ret;
776 }
777
778 static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
779         struct iovec *iov, unsigned int niov, int write)
780 {
781         int ret;
782         unsigned int i;
783         u32 sgl_count;
784         struct scatterlist *sg;
785
786         /*
787          * Find out how long sglist needs to be
788          */
789         sgl_count = 0;
790         for (i = 0; i < niov; i++)
791                 sgl_count += iov_num_pages(&iov[i]);
792
793         /* TODO overflow checking */
794
795         sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
796         if (!sg)
797                 return -ENOMEM;
798         pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
799                sg, sgl_count, !sg);
800         sg_init_table(sg, sgl_count);
801
802         tv_cmd->tvc_sgl = sg;
803         tv_cmd->tvc_sgl_count = sgl_count;
804
805         pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
806         for (i = 0; i < niov; i++) {
807                 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
808                 if (ret < 0) {
809                         for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
810                                 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
811                         kfree(tv_cmd->tvc_sgl);
812                         tv_cmd->tvc_sgl = NULL;
813                         tv_cmd->tvc_sgl_count = 0;
814                         return ret;
815                 }
816
817                 sg += ret;
818                 sgl_count -= ret;
819         }
820         return 0;
821 }
822
823 static void tcm_vhost_submission_work(struct work_struct *work)
824 {
825         struct tcm_vhost_cmd *tv_cmd =
826                 container_of(work, struct tcm_vhost_cmd, work);
827         struct tcm_vhost_nexus *tv_nexus;
828         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
829         struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
830         int rc, sg_no_bidi = 0;
831
832         if (tv_cmd->tvc_sgl_count) {
833                 sg_ptr = tv_cmd->tvc_sgl;
834 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
835 #if 0
836                 if (se_cmd->se_cmd_flags & SCF_BIDI) {
837                         sg_bidi_ptr = NULL;
838                         sg_no_bidi = 0;
839                 }
840 #endif
841         } else {
842                 sg_ptr = NULL;
843         }
844         tv_nexus = tv_cmd->tvc_nexus;
845
846         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
847                         tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
848                         tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
849                         tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
850                         0, sg_ptr, tv_cmd->tvc_sgl_count,
851                         sg_bidi_ptr, sg_no_bidi);
852         if (rc < 0) {
853                 transport_send_check_condition_and_sense(se_cmd,
854                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
855                 transport_generic_free_cmd(se_cmd, 0);
856         }
857 }
858
859 static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
860         struct vhost_virtqueue *vq, int head, unsigned out)
861 {
862         struct virtio_scsi_cmd_resp __user *resp;
863         struct virtio_scsi_cmd_resp rsp;
864         int ret;
865
866         memset(&rsp, 0, sizeof(rsp));
867         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
868         resp = vq->iov[out].iov_base;
869         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
870         if (!ret)
871                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
872         else
873                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
874 }
875
876 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
877         struct vhost_virtqueue *vq)
878 {
879         struct tcm_vhost_tpg **vs_tpg;
880         struct virtio_scsi_cmd_req v_req;
881         struct tcm_vhost_tpg *tv_tpg;
882         struct tcm_vhost_cmd *tv_cmd;
883         u32 exp_data_len, data_first, data_num, data_direction;
884         unsigned out, in, i;
885         int head, ret;
886         u8 target;
887
888         /*
889          * We can handle the vq only after the endpoint is setup by calling the
890          * VHOST_SCSI_SET_ENDPOINT ioctl.
891          *
892          * TODO: Check that we are running from vhost_worker which acts
893          * as read-side critical section for vhost kind of RCU.
894          * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
895          */
896         vs_tpg = rcu_dereference_check(vq->private_data, 1);
897         if (!vs_tpg)
898                 return;
899
900         mutex_lock(&vq->mutex);
901         vhost_disable_notify(&vs->dev, vq);
902
903         for (;;) {
904                 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
905                                         ARRAY_SIZE(vq->iov), &out, &in,
906                                         NULL, NULL);
907                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
908                                         head, out, in);
909                 /* On error, stop handling until the next kick. */
910                 if (unlikely(head < 0))
911                         break;
912                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
913                 if (head == vq->num) {
914                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
915                                 vhost_disable_notify(&vs->dev, vq);
916                                 continue;
917                         }
918                         break;
919                 }
920
921 /* FIXME: BIDI operation */
922                 if (out == 1 && in == 1) {
923                         data_direction = DMA_NONE;
924                         data_first = 0;
925                         data_num = 0;
926                 } else if (out == 1 && in > 1) {
927                         data_direction = DMA_FROM_DEVICE;
928                         data_first = out + 1;
929                         data_num = in - 1;
930                 } else if (out > 1 && in == 1) {
931                         data_direction = DMA_TO_DEVICE;
932                         data_first = 1;
933                         data_num = out - 1;
934                 } else {
935                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
936                                         out, in);
937                         break;
938                 }
939
940                 /*
941                  * Check for a sane resp buffer so we can report errors to
942                  * the guest.
943                  */
944                 if (unlikely(vq->iov[out].iov_len !=
945                                         sizeof(struct virtio_scsi_cmd_resp))) {
946                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
947                                 " bytes\n", vq->iov[out].iov_len);
948                         break;
949                 }
950
951                 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
952                         vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
953                                 " bytes\n", vq->iov[0].iov_len);
954                         break;
955                 }
956                 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
957                         " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
958                 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
959                                 sizeof(v_req));
960                 if (unlikely(ret)) {
961                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
962                         break;
963                 }
964
965                 /* Extract the tpgt */
966                 target = v_req.lun[1];
967                 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
968
969                 /* Target does not exist, fail the request */
970                 if (unlikely(!tv_tpg)) {
971                         vhost_scsi_send_bad_target(vs, vq, head, out);
972                         continue;
973                 }
974
975                 exp_data_len = 0;
976                 for (i = 0; i < data_num; i++)
977                         exp_data_len += vq->iov[data_first + i].iov_len;
978
979                 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
980                                         exp_data_len, data_direction);
981                 if (IS_ERR(tv_cmd)) {
982                         vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
983                                         PTR_ERR(tv_cmd));
984                         goto err_cmd;
985                 }
986                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
987                         ": %d\n", tv_cmd, exp_data_len, data_direction);
988
989                 tv_cmd->tvc_vhost = vs;
990                 tv_cmd->tvc_vq = vq;
991                 tv_cmd->tvc_resp = vq->iov[out].iov_base;
992
993                 /*
994                  * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
995                  * that will be used by tcm_vhost_new_cmd_map() and down into
996                  * target_setup_cmd_from_cdb()
997                  */
998                 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
999                 /*
1000                  * Check that the recieved CDB size does not exceeded our
1001                  * hardcoded max for tcm_vhost
1002                  */
1003                 /* TODO what if cdb was too small for varlen cdb header? */
1004                 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
1005                                         TCM_VHOST_MAX_CDB_SIZE)) {
1006                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1007                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1008                                 scsi_command_size(tv_cmd->tvc_cdb),
1009                                 TCM_VHOST_MAX_CDB_SIZE);
1010                         goto err_free;
1011                 }
1012                 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1013
1014                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1015                         tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
1016
1017                 if (data_direction != DMA_NONE) {
1018                         ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
1019                                         &vq->iov[data_first], data_num,
1020                                         data_direction == DMA_FROM_DEVICE);
1021                         if (unlikely(ret)) {
1022                                 vq_err(vq, "Failed to map iov to sgl\n");
1023                                 goto err_free;
1024                         }
1025                 }
1026
1027                 /*
1028                  * Save the descriptor from vhost_get_vq_desc() to be used to
1029                  * complete the virtio-scsi request in TCM callback context via
1030                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1031                  */
1032                 tv_cmd->tvc_vq_desc = head;
1033                 /*
1034                  * Dispatch tv_cmd descriptor for cmwq execution in process
1035                  * context provided by tcm_vhost_workqueue.  This also ensures
1036                  * tv_cmd is executed on the same kworker CPU as this vhost
1037                  * thread to gain positive L2 cache locality effects..
1038                  */
1039                 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
1040                 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
1041         }
1042
1043         mutex_unlock(&vq->mutex);
1044         return;
1045
1046 err_free:
1047         vhost_scsi_free_cmd(tv_cmd);
1048 err_cmd:
1049         vhost_scsi_send_bad_target(vs, vq, head, out);
1050         mutex_unlock(&vq->mutex);
1051 }
1052
1053 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1054 {
1055         pr_debug("%s: The handling func for control queue.\n", __func__);
1056 }
1057
1058 static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
1059         struct se_lun *lun, u32 event, u32 reason)
1060 {
1061         struct tcm_vhost_evt *evt;
1062
1063         evt = tcm_vhost_allocate_evt(vs, event, reason);
1064         if (!evt)
1065                 return;
1066
1067         if (tpg && lun) {
1068                 /* TODO: share lun setup code with virtio-scsi.ko */
1069                 /*
1070                  * Note: evt->event is zeroed when we allocate it and
1071                  * lun[4-7] need to be zero according to virtio-scsi spec.
1072                  */
1073                 evt->event.lun[0] = 0x01;
1074                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1075                 if (lun->unpacked_lun >= 256)
1076                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1077                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1078         }
1079
1080         llist_add(&evt->list, &vs->vs_event_list);
1081         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1082 }
1083
1084 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1085 {
1086         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1087                                                 poll.work);
1088         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1089
1090         mutex_lock(&vq->mutex);
1091         if (!vq->private_data)
1092                 goto out;
1093
1094         if (vs->vs_events_missed)
1095                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1096 out:
1097         mutex_unlock(&vq->mutex);
1098 }
1099
1100 static void vhost_scsi_handle_kick(struct vhost_work *work)
1101 {
1102         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1103                                                 poll.work);
1104         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1105
1106         vhost_scsi_handle_vq(vs, vq);
1107 }
1108
1109 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1110 {
1111         vhost_poll_flush(&vs->vqs[index].vq.poll);
1112 }
1113
1114 /* Callers must hold dev mutex */
1115 static void vhost_scsi_flush(struct vhost_scsi *vs)
1116 {
1117         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1118         int i;
1119
1120         /* Init new inflight and remember the old inflight */
1121         tcm_vhost_init_inflight(vs, old_inflight);
1122
1123         /*
1124          * The inflight->kref was initialized to 1. We decrement it here to
1125          * indicate the start of the flush operation so that it will reach 0
1126          * when all the reqs are finished.
1127          */
1128         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1129                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1130
1131         /* Flush both the vhost poll and vhost work */
1132         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1133                 vhost_scsi_flush_vq(vs, i);
1134         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1135         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1136
1137         /* Wait for all reqs issued before the flush to be finished */
1138         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1139                 wait_for_completion(&old_inflight[i]->comp);
1140 }
1141
1142 /*
1143  * Called from vhost_scsi_ioctl() context to walk the list of available
1144  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1145  *
1146  *  The lock nesting rule is:
1147  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1148  */
1149 static int vhost_scsi_set_endpoint(
1150         struct vhost_scsi *vs,
1151         struct vhost_scsi_target *t)
1152 {
1153         struct tcm_vhost_tport *tv_tport;
1154         struct tcm_vhost_tpg *tv_tpg;
1155         struct tcm_vhost_tpg **vs_tpg;
1156         struct vhost_virtqueue *vq;
1157         int index, ret, i, len;
1158         bool match = false;
1159
1160         mutex_lock(&tcm_vhost_mutex);
1161         mutex_lock(&vs->dev.mutex);
1162
1163         /* Verify that ring has been setup correctly. */
1164         for (index = 0; index < vs->dev.nvqs; ++index) {
1165                 /* Verify that ring has been setup correctly. */
1166                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1167                         ret = -EFAULT;
1168                         goto out;
1169                 }
1170         }
1171
1172         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1173         vs_tpg = kzalloc(len, GFP_KERNEL);
1174         if (!vs_tpg) {
1175                 ret = -ENOMEM;
1176                 goto out;
1177         }
1178         if (vs->vs_tpg)
1179                 memcpy(vs_tpg, vs->vs_tpg, len);
1180
1181         list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1182                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1183                 if (!tv_tpg->tpg_nexus) {
1184                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1185                         continue;
1186                 }
1187                 if (tv_tpg->tv_tpg_vhost_count != 0) {
1188                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1189                         continue;
1190                 }
1191                 tv_tport = tv_tpg->tport;
1192
1193                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1194                         if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
1195                                 kfree(vs_tpg);
1196                                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1197                                 ret = -EEXIST;
1198                                 goto out;
1199                         }
1200                         tv_tpg->tv_tpg_vhost_count++;
1201                         tv_tpg->vhost_scsi = vs;
1202                         vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1203                         smp_mb__after_atomic_inc();
1204                         match = true;
1205                 }
1206                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1207         }
1208
1209         if (match) {
1210                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1211                        sizeof(vs->vs_vhost_wwpn));
1212                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1213                         vq = &vs->vqs[i].vq;
1214                         /* Flushing the vhost_work acts as synchronize_rcu */
1215                         mutex_lock(&vq->mutex);
1216                         rcu_assign_pointer(vq->private_data, vs_tpg);
1217                         vhost_init_used(vq);
1218                         mutex_unlock(&vq->mutex);
1219                 }
1220                 ret = 0;
1221         } else {
1222                 ret = -EEXIST;
1223         }
1224
1225         /*
1226          * Act as synchronize_rcu to make sure access to
1227          * old vs->vs_tpg is finished.
1228          */
1229         vhost_scsi_flush(vs);
1230         kfree(vs->vs_tpg);
1231         vs->vs_tpg = vs_tpg;
1232
1233 out:
1234         mutex_unlock(&vs->dev.mutex);
1235         mutex_unlock(&tcm_vhost_mutex);
1236         return ret;
1237 }
1238
1239 static int vhost_scsi_clear_endpoint(
1240         struct vhost_scsi *vs,
1241         struct vhost_scsi_target *t)
1242 {
1243         struct tcm_vhost_tport *tv_tport;
1244         struct tcm_vhost_tpg *tv_tpg;
1245         struct vhost_virtqueue *vq;
1246         bool match = false;
1247         int index, ret, i;
1248         u8 target;
1249
1250         mutex_lock(&tcm_vhost_mutex);
1251         mutex_lock(&vs->dev.mutex);
1252         /* Verify that ring has been setup correctly. */
1253         for (index = 0; index < vs->dev.nvqs; ++index) {
1254                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1255                         ret = -EFAULT;
1256                         goto err_dev;
1257                 }
1258         }
1259
1260         if (!vs->vs_tpg) {
1261                 ret = 0;
1262                 goto err_dev;
1263         }
1264
1265         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1266                 target = i;
1267                 tv_tpg = vs->vs_tpg[target];
1268                 if (!tv_tpg)
1269                         continue;
1270
1271                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1272                 tv_tport = tv_tpg->tport;
1273                 if (!tv_tport) {
1274                         ret = -ENODEV;
1275                         goto err_tpg;
1276                 }
1277
1278                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1279                         pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1280                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1281                                 tv_tport->tport_name, tv_tpg->tport_tpgt,
1282                                 t->vhost_wwpn, t->vhost_tpgt);
1283                         ret = -EINVAL;
1284                         goto err_tpg;
1285                 }
1286                 tv_tpg->tv_tpg_vhost_count--;
1287                 tv_tpg->vhost_scsi = NULL;
1288                 vs->vs_tpg[target] = NULL;
1289                 match = true;
1290                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1291         }
1292         if (match) {
1293                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1294                         vq = &vs->vqs[i].vq;
1295                         /* Flushing the vhost_work acts as synchronize_rcu */
1296                         mutex_lock(&vq->mutex);
1297                         rcu_assign_pointer(vq->private_data, NULL);
1298                         mutex_unlock(&vq->mutex);
1299                 }
1300         }
1301         /*
1302          * Act as synchronize_rcu to make sure access to
1303          * old vs->vs_tpg is finished.
1304          */
1305         vhost_scsi_flush(vs);
1306         kfree(vs->vs_tpg);
1307         vs->vs_tpg = NULL;
1308         WARN_ON(vs->vs_events_nr);
1309         mutex_unlock(&vs->dev.mutex);
1310         mutex_unlock(&tcm_vhost_mutex);
1311         return 0;
1312
1313 err_tpg:
1314         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1315 err_dev:
1316         mutex_unlock(&vs->dev.mutex);
1317         mutex_unlock(&tcm_vhost_mutex);
1318         return ret;
1319 }
1320
1321 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1322 {
1323         if (features & ~VHOST_SCSI_FEATURES)
1324                 return -EOPNOTSUPP;
1325
1326         mutex_lock(&vs->dev.mutex);
1327         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1328             !vhost_log_access_ok(&vs->dev)) {
1329                 mutex_unlock(&vs->dev.mutex);
1330                 return -EFAULT;
1331         }
1332         vs->dev.acked_features = features;
1333         smp_wmb();
1334         vhost_scsi_flush(vs);
1335         mutex_unlock(&vs->dev.mutex);
1336         return 0;
1337 }
1338
1339 static int vhost_scsi_open(struct inode *inode, struct file *f)
1340 {
1341         struct vhost_scsi *s;
1342         struct vhost_virtqueue **vqs;
1343         int r, i;
1344
1345         s = kzalloc(sizeof(*s), GFP_KERNEL);
1346         if (!s)
1347                 return -ENOMEM;
1348
1349         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1350         if (!vqs) {
1351                 kfree(s);
1352                 return -ENOMEM;
1353         }
1354
1355         vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1356         vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1357
1358         s->vs_events_nr = 0;
1359         s->vs_events_missed = false;
1360
1361         vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1362         vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1363         s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1364         s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1365         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1366                 vqs[i] = &s->vqs[i].vq;
1367                 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1368         }
1369         r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1370
1371         tcm_vhost_init_inflight(s, NULL);
1372
1373         if (r < 0) {
1374                 kfree(vqs);
1375                 kfree(s);
1376                 return r;
1377         }
1378
1379         f->private_data = s;
1380         return 0;
1381 }
1382
1383 static int vhost_scsi_release(struct inode *inode, struct file *f)
1384 {
1385         struct vhost_scsi *s = f->private_data;
1386         struct vhost_scsi_target t;
1387
1388         mutex_lock(&s->dev.mutex);
1389         memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1390         mutex_unlock(&s->dev.mutex);
1391         vhost_scsi_clear_endpoint(s, &t);
1392         vhost_dev_stop(&s->dev);
1393         vhost_dev_cleanup(&s->dev, false);
1394         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1395         vhost_scsi_flush(s);
1396         kfree(s->dev.vqs);
1397         kfree(s);
1398         return 0;
1399 }
1400
1401 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1402                                 unsigned long arg)
1403 {
1404         struct vhost_scsi *vs = f->private_data;
1405         struct vhost_scsi_target backend;
1406         void __user *argp = (void __user *)arg;
1407         u64 __user *featurep = argp;
1408         u32 __user *eventsp = argp;
1409         u32 events_missed;
1410         u64 features;
1411         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1412         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1413
1414         switch (ioctl) {
1415         case VHOST_SCSI_SET_ENDPOINT:
1416                 if (copy_from_user(&backend, argp, sizeof backend))
1417                         return -EFAULT;
1418                 if (backend.reserved != 0)
1419                         return -EOPNOTSUPP;
1420
1421                 return vhost_scsi_set_endpoint(vs, &backend);
1422         case VHOST_SCSI_CLEAR_ENDPOINT:
1423                 if (copy_from_user(&backend, argp, sizeof backend))
1424                         return -EFAULT;
1425                 if (backend.reserved != 0)
1426                         return -EOPNOTSUPP;
1427
1428                 return vhost_scsi_clear_endpoint(vs, &backend);
1429         case VHOST_SCSI_GET_ABI_VERSION:
1430                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1431                         return -EFAULT;
1432                 return 0;
1433         case VHOST_SCSI_SET_EVENTS_MISSED:
1434                 if (get_user(events_missed, eventsp))
1435                         return -EFAULT;
1436                 mutex_lock(&vq->mutex);
1437                 vs->vs_events_missed = events_missed;
1438                 mutex_unlock(&vq->mutex);
1439                 return 0;
1440         case VHOST_SCSI_GET_EVENTS_MISSED:
1441                 mutex_lock(&vq->mutex);
1442                 events_missed = vs->vs_events_missed;
1443                 mutex_unlock(&vq->mutex);
1444                 if (put_user(events_missed, eventsp))
1445                         return -EFAULT;
1446                 return 0;
1447         case VHOST_GET_FEATURES:
1448                 features = VHOST_SCSI_FEATURES;
1449                 if (copy_to_user(featurep, &features, sizeof features))
1450                         return -EFAULT;
1451                 return 0;
1452         case VHOST_SET_FEATURES:
1453                 if (copy_from_user(&features, featurep, sizeof features))
1454                         return -EFAULT;
1455                 return vhost_scsi_set_features(vs, features);
1456         default:
1457                 mutex_lock(&vs->dev.mutex);
1458                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1459                 /* TODO: flush backend after dev ioctl. */
1460                 if (r == -ENOIOCTLCMD)
1461                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1462                 mutex_unlock(&vs->dev.mutex);
1463                 return r;
1464         }
1465 }
1466
1467 #ifdef CONFIG_COMPAT
1468 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1469                                 unsigned long arg)
1470 {
1471         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1472 }
1473 #endif
1474
1475 static const struct file_operations vhost_scsi_fops = {
1476         .owner          = THIS_MODULE,
1477         .release        = vhost_scsi_release,
1478         .unlocked_ioctl = vhost_scsi_ioctl,
1479 #ifdef CONFIG_COMPAT
1480         .compat_ioctl   = vhost_scsi_compat_ioctl,
1481 #endif
1482         .open           = vhost_scsi_open,
1483         .llseek         = noop_llseek,
1484 };
1485
1486 static struct miscdevice vhost_scsi_misc = {
1487         MISC_DYNAMIC_MINOR,
1488         "vhost-scsi",
1489         &vhost_scsi_fops,
1490 };
1491
1492 static int __init vhost_scsi_register(void)
1493 {
1494         return misc_register(&vhost_scsi_misc);
1495 }
1496
1497 static int vhost_scsi_deregister(void)
1498 {
1499         return misc_deregister(&vhost_scsi_misc);
1500 }
1501
1502 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1503 {
1504         switch (tport->tport_proto_id) {
1505         case SCSI_PROTOCOL_SAS:
1506                 return "SAS";
1507         case SCSI_PROTOCOL_FCP:
1508                 return "FCP";
1509         case SCSI_PROTOCOL_ISCSI:
1510                 return "iSCSI";
1511         default:
1512                 break;
1513         }
1514
1515         return "Unknown";
1516 }
1517
1518 static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1519         struct se_lun *lun, bool plug)
1520 {
1521
1522         struct vhost_scsi *vs = tpg->vhost_scsi;
1523         struct vhost_virtqueue *vq;
1524         u32 reason;
1525
1526         if (!vs)
1527                 return;
1528
1529         mutex_lock(&vs->dev.mutex);
1530         if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1531                 mutex_unlock(&vs->dev.mutex);
1532                 return;
1533         }
1534
1535         if (plug)
1536                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1537         else
1538                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1539
1540         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1541         mutex_lock(&vq->mutex);
1542         tcm_vhost_send_evt(vs, tpg, lun,
1543                         VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1544         mutex_unlock(&vq->mutex);
1545         mutex_unlock(&vs->dev.mutex);
1546 }
1547
1548 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1549 {
1550         tcm_vhost_do_plug(tpg, lun, true);
1551 }
1552
1553 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1554 {
1555         tcm_vhost_do_plug(tpg, lun, false);
1556 }
1557
1558 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1559         struct se_lun *lun)
1560 {
1561         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1562                                 struct tcm_vhost_tpg, se_tpg);
1563
1564         mutex_lock(&tcm_vhost_mutex);
1565
1566         mutex_lock(&tv_tpg->tv_tpg_mutex);
1567         tv_tpg->tv_tpg_port_count++;
1568         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1569
1570         tcm_vhost_hotplug(tv_tpg, lun);
1571
1572         mutex_unlock(&tcm_vhost_mutex);
1573
1574         return 0;
1575 }
1576
1577 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1578         struct se_lun *lun)
1579 {
1580         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1581                                 struct tcm_vhost_tpg, se_tpg);
1582
1583         mutex_lock(&tcm_vhost_mutex);
1584
1585         mutex_lock(&tv_tpg->tv_tpg_mutex);
1586         tv_tpg->tv_tpg_port_count--;
1587         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1588
1589         tcm_vhost_hotunplug(tv_tpg, lun);
1590
1591         mutex_unlock(&tcm_vhost_mutex);
1592 }
1593
1594 static struct se_node_acl *tcm_vhost_make_nodeacl(
1595         struct se_portal_group *se_tpg,
1596         struct config_group *group,
1597         const char *name)
1598 {
1599         struct se_node_acl *se_nacl, *se_nacl_new;
1600         struct tcm_vhost_nacl *nacl;
1601         u64 wwpn = 0;
1602         u32 nexus_depth;
1603
1604         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1605                 return ERR_PTR(-EINVAL); */
1606         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1607         if (!se_nacl_new)
1608                 return ERR_PTR(-ENOMEM);
1609
1610         nexus_depth = 1;
1611         /*
1612          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1613          * when converting a NodeACL from demo mode -> explict
1614          */
1615         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1616                                 name, nexus_depth);
1617         if (IS_ERR(se_nacl)) {
1618                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1619                 return se_nacl;
1620         }
1621         /*
1622          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1623          */
1624         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1625         nacl->iport_wwpn = wwpn;
1626
1627         return se_nacl;
1628 }
1629
1630 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1631 {
1632         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1633                                 struct tcm_vhost_nacl, se_node_acl);
1634         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1635         kfree(nacl);
1636 }
1637
1638 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1639         const char *name)
1640 {
1641         struct se_portal_group *se_tpg;
1642         struct tcm_vhost_nexus *tv_nexus;
1643
1644         mutex_lock(&tv_tpg->tv_tpg_mutex);
1645         if (tv_tpg->tpg_nexus) {
1646                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1647                 pr_debug("tv_tpg->tpg_nexus already exists\n");
1648                 return -EEXIST;
1649         }
1650         se_tpg = &tv_tpg->se_tpg;
1651
1652         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1653         if (!tv_nexus) {
1654                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1655                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1656                 return -ENOMEM;
1657         }
1658         /*
1659          *  Initialize the struct se_session pointer
1660          */
1661         tv_nexus->tvn_se_sess = transport_init_session();
1662         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1663                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1664                 kfree(tv_nexus);
1665                 return -ENOMEM;
1666         }
1667         /*
1668          * Since we are running in 'demo mode' this call with generate a
1669          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1670          * the SCSI Initiator port name of the passed configfs group 'name'.
1671          */
1672         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1673                                 se_tpg, (unsigned char *)name);
1674         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1675                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1676                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1677                                 " for %s\n", name);
1678                 transport_free_session(tv_nexus->tvn_se_sess);
1679                 kfree(tv_nexus);
1680                 return -ENOMEM;
1681         }
1682         /*
1683          * Now register the TCM vhost virtual I_T Nexus as active with the
1684          * call to __transport_register_session()
1685          */
1686         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1687                         tv_nexus->tvn_se_sess, tv_nexus);
1688         tv_tpg->tpg_nexus = tv_nexus;
1689
1690         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1691         return 0;
1692 }
1693
1694 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1695 {
1696         struct se_session *se_sess;
1697         struct tcm_vhost_nexus *tv_nexus;
1698
1699         mutex_lock(&tpg->tv_tpg_mutex);
1700         tv_nexus = tpg->tpg_nexus;
1701         if (!tv_nexus) {
1702                 mutex_unlock(&tpg->tv_tpg_mutex);
1703                 return -ENODEV;
1704         }
1705
1706         se_sess = tv_nexus->tvn_se_sess;
1707         if (!se_sess) {
1708                 mutex_unlock(&tpg->tv_tpg_mutex);
1709                 return -ENODEV;
1710         }
1711
1712         if (tpg->tv_tpg_port_count != 0) {
1713                 mutex_unlock(&tpg->tv_tpg_mutex);
1714                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1715                         " active TPG port count: %d\n",
1716                         tpg->tv_tpg_port_count);
1717                 return -EBUSY;
1718         }
1719
1720         if (tpg->tv_tpg_vhost_count != 0) {
1721                 mutex_unlock(&tpg->tv_tpg_mutex);
1722                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1723                         " active TPG vhost count: %d\n",
1724                         tpg->tv_tpg_vhost_count);
1725                 return -EBUSY;
1726         }
1727
1728         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1729                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1730                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1731         /*
1732          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1733          */
1734         transport_deregister_session(tv_nexus->tvn_se_sess);
1735         tpg->tpg_nexus = NULL;
1736         mutex_unlock(&tpg->tv_tpg_mutex);
1737
1738         kfree(tv_nexus);
1739         return 0;
1740 }
1741
1742 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1743         char *page)
1744 {
1745         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1746                                 struct tcm_vhost_tpg, se_tpg);
1747         struct tcm_vhost_nexus *tv_nexus;
1748         ssize_t ret;
1749
1750         mutex_lock(&tv_tpg->tv_tpg_mutex);
1751         tv_nexus = tv_tpg->tpg_nexus;
1752         if (!tv_nexus) {
1753                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1754                 return -ENODEV;
1755         }
1756         ret = snprintf(page, PAGE_SIZE, "%s\n",
1757                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1758         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1759
1760         return ret;
1761 }
1762
1763 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1764         const char *page,
1765         size_t count)
1766 {
1767         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1768                                 struct tcm_vhost_tpg, se_tpg);
1769         struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1770         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1771         int ret;
1772         /*
1773          * Shutdown the active I_T nexus if 'NULL' is passed..
1774          */
1775         if (!strncmp(page, "NULL", 4)) {
1776                 ret = tcm_vhost_drop_nexus(tv_tpg);
1777                 return (!ret) ? count : ret;
1778         }
1779         /*
1780          * Otherwise make sure the passed virtual Initiator port WWN matches
1781          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1782          * tcm_vhost_make_nexus().
1783          */
1784         if (strlen(page) >= TCM_VHOST_NAMELEN) {
1785                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1786                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
1787                 return -EINVAL;
1788         }
1789         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1790
1791         ptr = strstr(i_port, "naa.");
1792         if (ptr) {
1793                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1794                         pr_err("Passed SAS Initiator Port %s does not"
1795                                 " match target port protoid: %s\n", i_port,
1796                                 tcm_vhost_dump_proto_id(tport_wwn));
1797                         return -EINVAL;
1798                 }
1799                 port_ptr = &i_port[0];
1800                 goto check_newline;
1801         }
1802         ptr = strstr(i_port, "fc.");
1803         if (ptr) {
1804                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1805                         pr_err("Passed FCP Initiator Port %s does not"
1806                                 " match target port protoid: %s\n", i_port,
1807                                 tcm_vhost_dump_proto_id(tport_wwn));
1808                         return -EINVAL;
1809                 }
1810                 port_ptr = &i_port[3]; /* Skip over "fc." */
1811                 goto check_newline;
1812         }
1813         ptr = strstr(i_port, "iqn.");
1814         if (ptr) {
1815                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1816                         pr_err("Passed iSCSI Initiator Port %s does not"
1817                                 " match target port protoid: %s\n", i_port,
1818                                 tcm_vhost_dump_proto_id(tport_wwn));
1819                         return -EINVAL;
1820                 }
1821                 port_ptr = &i_port[0];
1822                 goto check_newline;
1823         }
1824         pr_err("Unable to locate prefix for emulated Initiator Port:"
1825                         " %s\n", i_port);
1826         return -EINVAL;
1827         /*
1828          * Clear any trailing newline for the NAA WWN
1829          */
1830 check_newline:
1831         if (i_port[strlen(i_port)-1] == '\n')
1832                 i_port[strlen(i_port)-1] = '\0';
1833
1834         ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1835         if (ret < 0)
1836                 return ret;
1837
1838         return count;
1839 }
1840
1841 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1842
1843 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1844         &tcm_vhost_tpg_nexus.attr,
1845         NULL,
1846 };
1847
1848 static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1849         struct config_group *group,
1850         const char *name)
1851 {
1852         struct tcm_vhost_tport *tport = container_of(wwn,
1853                         struct tcm_vhost_tport, tport_wwn);
1854
1855         struct tcm_vhost_tpg *tpg;
1856         unsigned long tpgt;
1857         int ret;
1858
1859         if (strstr(name, "tpgt_") != name)
1860                 return ERR_PTR(-EINVAL);
1861         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1862                 return ERR_PTR(-EINVAL);
1863
1864         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1865         if (!tpg) {
1866                 pr_err("Unable to allocate struct tcm_vhost_tpg");
1867                 return ERR_PTR(-ENOMEM);
1868         }
1869         mutex_init(&tpg->tv_tpg_mutex);
1870         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1871         tpg->tport = tport;
1872         tpg->tport_tpgt = tpgt;
1873
1874         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1875                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1876         if (ret < 0) {
1877                 kfree(tpg);
1878                 return NULL;
1879         }
1880         mutex_lock(&tcm_vhost_mutex);
1881         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1882         mutex_unlock(&tcm_vhost_mutex);
1883
1884         return &tpg->se_tpg;
1885 }
1886
1887 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1888 {
1889         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1890                                 struct tcm_vhost_tpg, se_tpg);
1891
1892         mutex_lock(&tcm_vhost_mutex);
1893         list_del(&tpg->tv_tpg_list);
1894         mutex_unlock(&tcm_vhost_mutex);
1895         /*
1896          * Release the virtual I_T Nexus for this vhost TPG
1897          */
1898         tcm_vhost_drop_nexus(tpg);
1899         /*
1900          * Deregister the se_tpg from TCM..
1901          */
1902         core_tpg_deregister(se_tpg);
1903         kfree(tpg);
1904 }
1905
1906 static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1907         struct config_group *group,
1908         const char *name)
1909 {
1910         struct tcm_vhost_tport *tport;
1911         char *ptr;
1912         u64 wwpn = 0;
1913         int off = 0;
1914
1915         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1916                 return ERR_PTR(-EINVAL); */
1917
1918         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1919         if (!tport) {
1920                 pr_err("Unable to allocate struct tcm_vhost_tport");
1921                 return ERR_PTR(-ENOMEM);
1922         }
1923         tport->tport_wwpn = wwpn;
1924         /*
1925          * Determine the emulated Protocol Identifier and Target Port Name
1926          * based on the incoming configfs directory name.
1927          */
1928         ptr = strstr(name, "naa.");
1929         if (ptr) {
1930                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1931                 goto check_len;
1932         }
1933         ptr = strstr(name, "fc.");
1934         if (ptr) {
1935                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1936                 off = 3; /* Skip over "fc." */
1937                 goto check_len;
1938         }
1939         ptr = strstr(name, "iqn.");
1940         if (ptr) {
1941                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1942                 goto check_len;
1943         }
1944
1945         pr_err("Unable to locate prefix for emulated Target Port:"
1946                         " %s\n", name);
1947         kfree(tport);
1948         return ERR_PTR(-EINVAL);
1949
1950 check_len:
1951         if (strlen(name) >= TCM_VHOST_NAMELEN) {
1952                 pr_err("Emulated %s Address: %s, exceeds"
1953                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1954                         TCM_VHOST_NAMELEN);
1955                 kfree(tport);
1956                 return ERR_PTR(-EINVAL);
1957         }
1958         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1959
1960         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1961                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1962
1963         return &tport->tport_wwn;
1964 }
1965
1966 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1967 {
1968         struct tcm_vhost_tport *tport = container_of(wwn,
1969                                 struct tcm_vhost_tport, tport_wwn);
1970
1971         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1972                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1973                 tport->tport_name);
1974
1975         kfree(tport);
1976 }
1977
1978 static ssize_t tcm_vhost_wwn_show_attr_version(
1979         struct target_fabric_configfs *tf,
1980         char *page)
1981 {
1982         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1983                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1984                 utsname()->machine);
1985 }
1986
1987 TF_WWN_ATTR_RO(tcm_vhost, version);
1988
1989 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1990         &tcm_vhost_wwn_version.attr,
1991         NULL,
1992 };
1993
1994 static struct target_core_fabric_ops tcm_vhost_ops = {
1995         .get_fabric_name                = tcm_vhost_get_fabric_name,
1996         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
1997         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
1998         .tpg_get_tag                    = tcm_vhost_get_tag,
1999         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2000         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2001         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2002         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2003         .tpg_check_demo_mode            = tcm_vhost_check_true,
2004         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2005         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2006         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2007         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2008         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2009         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2010         .release_cmd                    = tcm_vhost_release_cmd,
2011         .shutdown_session               = tcm_vhost_shutdown_session,
2012         .close_session                  = tcm_vhost_close_session,
2013         .sess_get_index                 = tcm_vhost_sess_get_index,
2014         .sess_get_initiator_sid         = NULL,
2015         .write_pending                  = tcm_vhost_write_pending,
2016         .write_pending_status           = tcm_vhost_write_pending_status,
2017         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2018         .get_task_tag                   = tcm_vhost_get_task_tag,
2019         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2020         .queue_data_in                  = tcm_vhost_queue_data_in,
2021         .queue_status                   = tcm_vhost_queue_status,
2022         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2023         /*
2024          * Setup callers for generic logic in target_core_fabric_configfs.c
2025          */
2026         .fabric_make_wwn                = tcm_vhost_make_tport,
2027         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2028         .fabric_make_tpg                = tcm_vhost_make_tpg,
2029         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2030         .fabric_post_link               = tcm_vhost_port_link,
2031         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2032         .fabric_make_np                 = NULL,
2033         .fabric_drop_np                 = NULL,
2034         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2035         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2036 };
2037
2038 static int tcm_vhost_register_configfs(void)
2039 {
2040         struct target_fabric_configfs *fabric;
2041         int ret;
2042
2043         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2044                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2045                 utsname()->machine);
2046         /*
2047          * Register the top level struct config_item_type with TCM core
2048          */
2049         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2050         if (IS_ERR(fabric)) {
2051                 pr_err("target_fabric_configfs_init() failed\n");
2052                 return PTR_ERR(fabric);
2053         }
2054         /*
2055          * Setup fabric->tf_ops from our local tcm_vhost_ops
2056          */
2057         fabric->tf_ops = tcm_vhost_ops;
2058         /*
2059          * Setup default attribute lists for various fabric->tf_cit_tmpl
2060          */
2061         TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2062         TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2063         TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
2064         TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2065         TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2066         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2067         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2068         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2069         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2070         /*
2071          * Register the fabric for use within TCM
2072          */
2073         ret = target_fabric_configfs_register(fabric);
2074         if (ret < 0) {
2075                 pr_err("target_fabric_configfs_register() failed"
2076                                 " for TCM_VHOST\n");
2077                 return ret;
2078         }
2079         /*
2080          * Setup our local pointer to *fabric
2081          */
2082         tcm_vhost_fabric_configfs = fabric;
2083         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2084         return 0;
2085 };
2086
2087 static void tcm_vhost_deregister_configfs(void)
2088 {
2089         if (!tcm_vhost_fabric_configfs)
2090                 return;
2091
2092         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2093         tcm_vhost_fabric_configfs = NULL;
2094         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2095 };
2096
2097 static int __init tcm_vhost_init(void)
2098 {
2099         int ret = -ENOMEM;
2100         /*
2101          * Use our own dedicated workqueue for submitting I/O into
2102          * target core to avoid contention within system_wq.
2103          */
2104         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2105         if (!tcm_vhost_workqueue)
2106                 goto out;
2107
2108         ret = vhost_scsi_register();
2109         if (ret < 0)
2110                 goto out_destroy_workqueue;
2111
2112         ret = tcm_vhost_register_configfs();
2113         if (ret < 0)
2114                 goto out_vhost_scsi_deregister;
2115
2116         return 0;
2117
2118 out_vhost_scsi_deregister:
2119         vhost_scsi_deregister();
2120 out_destroy_workqueue:
2121         destroy_workqueue(tcm_vhost_workqueue);
2122 out:
2123         return ret;
2124 };
2125
2126 static void tcm_vhost_exit(void)
2127 {
2128         tcm_vhost_deregister_configfs();
2129         vhost_scsi_deregister();
2130         destroy_workqueue(tcm_vhost_workqueue);
2131 };
2132
2133 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2134 MODULE_ALIAS("tcm_vhost");
2135 MODULE_LICENSE("GPL");
2136 module_init(tcm_vhost_init);
2137 module_exit(tcm_vhost_exit);