HDMI:rk3368/rk3288: delete some unused code.
[firefly-linux-kernel-4.4.55.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2012 RisingTide Systems LLC.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51
52 #include "vhost.c"
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58
59 struct vhost_scsi_inflight {
60         /* Wait for the flush operation to finish */
61         struct completion comp;
62         /* Refcount for the inflight reqs */
63         struct kref kref;
64 };
65
66 struct tcm_vhost_cmd {
67         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
68         int tvc_vq_desc;
69         /* virtio-scsi initiator task attribute */
70         int tvc_task_attr;
71         /* virtio-scsi initiator data direction */
72         enum dma_data_direction tvc_data_direction;
73         /* Expected data transfer length from virtio-scsi header */
74         u32 tvc_exp_data_len;
75         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
76         u64 tvc_tag;
77         /* The number of scatterlists associated with this cmd */
78         u32 tvc_sgl_count;
79         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
80         u32 tvc_lun;
81         /* Pointer to the SGL formatted memory from virtio-scsi */
82         struct scatterlist *tvc_sgl;
83         /* Pointer to response */
84         struct virtio_scsi_cmd_resp __user *tvc_resp;
85         /* Pointer to vhost_scsi for our device */
86         struct vhost_scsi *tvc_vhost;
87         /* Pointer to vhost_virtqueue for the cmd */
88         struct vhost_virtqueue *tvc_vq;
89         /* Pointer to vhost nexus memory */
90         struct tcm_vhost_nexus *tvc_nexus;
91         /* The TCM I/O descriptor that is accessed via container_of() */
92         struct se_cmd tvc_se_cmd;
93         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
94         struct work_struct work;
95         /* Copy of the incoming SCSI command descriptor block (CDB) */
96         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
97         /* Sense buffer that will be mapped into outgoing status */
98         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
99         /* Completed commands list, serviced from vhost worker thread */
100         struct llist_node tvc_completion_list;
101         /* Used to track inflight cmd */
102         struct vhost_scsi_inflight *inflight;
103 };
104
105 struct tcm_vhost_nexus {
106         /* Pointer to TCM session for I_T Nexus */
107         struct se_session *tvn_se_sess;
108 };
109
110 struct tcm_vhost_nacl {
111         /* Binary World Wide unique Port Name for Vhost Initiator port */
112         u64 iport_wwpn;
113         /* ASCII formatted WWPN for Sas Initiator port */
114         char iport_name[TCM_VHOST_NAMELEN];
115         /* Returned by tcm_vhost_make_nodeacl() */
116         struct se_node_acl se_node_acl;
117 };
118
119 struct vhost_scsi;
120 struct tcm_vhost_tpg {
121         /* Vhost port target portal group tag for TCM */
122         u16 tport_tpgt;
123         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
124         int tv_tpg_port_count;
125         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
126         int tv_tpg_vhost_count;
127         /* list for tcm_vhost_list */
128         struct list_head tv_tpg_list;
129         /* Used to protect access for tpg_nexus */
130         struct mutex tv_tpg_mutex;
131         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
132         struct tcm_vhost_nexus *tpg_nexus;
133         /* Pointer back to tcm_vhost_tport */
134         struct tcm_vhost_tport *tport;
135         /* Returned by tcm_vhost_make_tpg() */
136         struct se_portal_group se_tpg;
137         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
138         struct vhost_scsi *vhost_scsi;
139 };
140
141 struct tcm_vhost_tport {
142         /* SCSI protocol the tport is providing */
143         u8 tport_proto_id;
144         /* Binary World Wide unique Port Name for Vhost Target port */
145         u64 tport_wwpn;
146         /* ASCII formatted WWPN for Vhost Target port */
147         char tport_name[TCM_VHOST_NAMELEN];
148         /* Returned by tcm_vhost_make_tport() */
149         struct se_wwn tport_wwn;
150 };
151
152 struct tcm_vhost_evt {
153         /* event to be sent to guest */
154         struct virtio_scsi_event event;
155         /* event list, serviced from vhost worker thread */
156         struct llist_node list;
157 };
158
159 enum {
160         VHOST_SCSI_VQ_CTL = 0,
161         VHOST_SCSI_VQ_EVT = 1,
162         VHOST_SCSI_VQ_IO = 2,
163 };
164
165 enum {
166         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
167 };
168
169 #define VHOST_SCSI_MAX_TARGET   256
170 #define VHOST_SCSI_MAX_VQ       128
171 #define VHOST_SCSI_MAX_EVENT    128
172
173 struct vhost_scsi_virtqueue {
174         struct vhost_virtqueue vq;
175         /*
176          * Reference counting for inflight reqs, used for flush operation. At
177          * each time, one reference tracks new commands submitted, while we
178          * wait for another one to reach 0.
179          */
180         struct vhost_scsi_inflight inflights[2];
181         /*
182          * Indicate current inflight in use, protected by vq->mutex.
183          * Writers must also take dev mutex and flush under it.
184          */
185         int inflight_idx;
186 };
187
188 struct vhost_scsi {
189         /* Protected by vhost_scsi->dev.mutex */
190         struct tcm_vhost_tpg **vs_tpg;
191         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
192
193         struct vhost_dev dev;
194         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
195
196         struct vhost_work vs_completion_work; /* cmd completion work item */
197         struct llist_head vs_completion_list; /* cmd completion queue */
198
199         struct vhost_work vs_event_work; /* evt injection work item */
200         struct llist_head vs_event_list; /* evt injection queue */
201
202         bool vs_events_missed; /* any missed events, protected by vq->mutex */
203         int vs_events_nr; /* num of pending events, protected by vq->mutex */
204 };
205
206 /* Local pointer to allocated TCM configfs fabric module */
207 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
208
209 static struct workqueue_struct *tcm_vhost_workqueue;
210
211 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
212 static DEFINE_MUTEX(tcm_vhost_mutex);
213 static LIST_HEAD(tcm_vhost_list);
214
215 static int iov_num_pages(struct iovec *iov)
216 {
217         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
218                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
219 }
220
221 void tcm_vhost_done_inflight(struct kref *kref)
222 {
223         struct vhost_scsi_inflight *inflight;
224
225         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
226         complete(&inflight->comp);
227 }
228
229 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
230                                     struct vhost_scsi_inflight *old_inflight[])
231 {
232         struct vhost_scsi_inflight *new_inflight;
233         struct vhost_virtqueue *vq;
234         int idx, i;
235
236         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
237                 vq = &vs->vqs[i].vq;
238
239                 mutex_lock(&vq->mutex);
240
241                 /* store old infight */
242                 idx = vs->vqs[i].inflight_idx;
243                 if (old_inflight)
244                         old_inflight[i] = &vs->vqs[i].inflights[idx];
245
246                 /* setup new infight */
247                 vs->vqs[i].inflight_idx = idx ^ 1;
248                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
249                 kref_init(&new_inflight->kref);
250                 init_completion(&new_inflight->comp);
251
252                 mutex_unlock(&vq->mutex);
253         }
254 }
255
256 static struct vhost_scsi_inflight *
257 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
258 {
259         struct vhost_scsi_inflight *inflight;
260         struct vhost_scsi_virtqueue *svq;
261
262         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
263         inflight = &svq->inflights[svq->inflight_idx];
264         kref_get(&inflight->kref);
265
266         return inflight;
267 }
268
269 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
270 {
271         kref_put(&inflight->kref, tcm_vhost_done_inflight);
272 }
273
274 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
275 {
276         return 1;
277 }
278
279 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
280 {
281         return 0;
282 }
283
284 static char *tcm_vhost_get_fabric_name(void)
285 {
286         return "vhost";
287 }
288
289 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
290 {
291         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
292                                 struct tcm_vhost_tpg, se_tpg);
293         struct tcm_vhost_tport *tport = tpg->tport;
294
295         switch (tport->tport_proto_id) {
296         case SCSI_PROTOCOL_SAS:
297                 return sas_get_fabric_proto_ident(se_tpg);
298         case SCSI_PROTOCOL_FCP:
299                 return fc_get_fabric_proto_ident(se_tpg);
300         case SCSI_PROTOCOL_ISCSI:
301                 return iscsi_get_fabric_proto_ident(se_tpg);
302         default:
303                 pr_err("Unknown tport_proto_id: 0x%02x, using"
304                         " SAS emulation\n", tport->tport_proto_id);
305                 break;
306         }
307
308         return sas_get_fabric_proto_ident(se_tpg);
309 }
310
311 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
312 {
313         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
314                                 struct tcm_vhost_tpg, se_tpg);
315         struct tcm_vhost_tport *tport = tpg->tport;
316
317         return &tport->tport_name[0];
318 }
319
320 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
321 {
322         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
323                                 struct tcm_vhost_tpg, se_tpg);
324         return tpg->tport_tpgt;
325 }
326
327 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
328 {
329         return 1;
330 }
331
332 static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
333         struct se_node_acl *se_nacl,
334         struct t10_pr_registration *pr_reg,
335         int *format_code,
336         unsigned char *buf)
337 {
338         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
339                                 struct tcm_vhost_tpg, se_tpg);
340         struct tcm_vhost_tport *tport = tpg->tport;
341
342         switch (tport->tport_proto_id) {
343         case SCSI_PROTOCOL_SAS:
344                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
345                                         format_code, buf);
346         case SCSI_PROTOCOL_FCP:
347                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
348                                         format_code, buf);
349         case SCSI_PROTOCOL_ISCSI:
350                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
351                                         format_code, buf);
352         default:
353                 pr_err("Unknown tport_proto_id: 0x%02x, using"
354                         " SAS emulation\n", tport->tport_proto_id);
355                 break;
356         }
357
358         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
359                         format_code, buf);
360 }
361
362 static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
363         struct se_node_acl *se_nacl,
364         struct t10_pr_registration *pr_reg,
365         int *format_code)
366 {
367         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
368                                 struct tcm_vhost_tpg, se_tpg);
369         struct tcm_vhost_tport *tport = tpg->tport;
370
371         switch (tport->tport_proto_id) {
372         case SCSI_PROTOCOL_SAS:
373                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
374                                         format_code);
375         case SCSI_PROTOCOL_FCP:
376                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
377                                         format_code);
378         case SCSI_PROTOCOL_ISCSI:
379                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
380                                         format_code);
381         default:
382                 pr_err("Unknown tport_proto_id: 0x%02x, using"
383                         " SAS emulation\n", tport->tport_proto_id);
384                 break;
385         }
386
387         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
388                         format_code);
389 }
390
391 static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
392         const char *buf,
393         u32 *out_tid_len,
394         char **port_nexus_ptr)
395 {
396         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
397                                 struct tcm_vhost_tpg, se_tpg);
398         struct tcm_vhost_tport *tport = tpg->tport;
399
400         switch (tport->tport_proto_id) {
401         case SCSI_PROTOCOL_SAS:
402                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
403                                         port_nexus_ptr);
404         case SCSI_PROTOCOL_FCP:
405                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
406                                         port_nexus_ptr);
407         case SCSI_PROTOCOL_ISCSI:
408                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
409                                         port_nexus_ptr);
410         default:
411                 pr_err("Unknown tport_proto_id: 0x%02x, using"
412                         " SAS emulation\n", tport->tport_proto_id);
413                 break;
414         }
415
416         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
417                         port_nexus_ptr);
418 }
419
420 static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
421         struct se_portal_group *se_tpg)
422 {
423         struct tcm_vhost_nacl *nacl;
424
425         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
426         if (!nacl) {
427                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
428                 return NULL;
429         }
430
431         return &nacl->se_node_acl;
432 }
433
434 static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
435         struct se_node_acl *se_nacl)
436 {
437         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
438                         struct tcm_vhost_nacl, se_node_acl);
439         kfree(nacl);
440 }
441
442 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
443 {
444         return 1;
445 }
446
447 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
448 {
449         return;
450 }
451
452 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
453 {
454         return 0;
455 }
456
457 static void tcm_vhost_close_session(struct se_session *se_sess)
458 {
459         return;
460 }
461
462 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
463 {
464         return 0;
465 }
466
467 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
468 {
469         /* Go ahead and process the write immediately */
470         target_execute_cmd(se_cmd);
471         return 0;
472 }
473
474 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
475 {
476         return 0;
477 }
478
479 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
480 {
481         return;
482 }
483
484 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
485 {
486         return 0;
487 }
488
489 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
490 {
491         return 0;
492 }
493
494 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
495 {
496         struct vhost_scsi *vs = tv_cmd->tvc_vhost;
497
498         llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
499
500         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
501 }
502
503 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
504 {
505         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
506                                 struct tcm_vhost_cmd, tvc_se_cmd);
507         vhost_scsi_complete_cmd(tv_cmd);
508         return 0;
509 }
510
511 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
512 {
513         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
514                                 struct tcm_vhost_cmd, tvc_se_cmd);
515         vhost_scsi_complete_cmd(tv_cmd);
516         return 0;
517 }
518
519 static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
520 {
521         return 0;
522 }
523
524 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
525 {
526         vs->vs_events_nr--;
527         kfree(evt);
528 }
529
530 static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
531         u32 event, u32 reason)
532 {
533         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
534         struct tcm_vhost_evt *evt;
535
536         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
537                 vs->vs_events_missed = true;
538                 return NULL;
539         }
540
541         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
542         if (!evt) {
543                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
544                 vs->vs_events_missed = true;
545                 return NULL;
546         }
547
548         evt->event.event = event;
549         evt->event.reason = reason;
550         vs->vs_events_nr++;
551
552         return evt;
553 }
554
555 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
556 {
557         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
558
559         /* TODO locking against target/backend threads? */
560         transport_generic_free_cmd(se_cmd, 1);
561
562         if (tv_cmd->tvc_sgl_count) {
563                 u32 i;
564                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
565                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
566
567                 kfree(tv_cmd->tvc_sgl);
568         }
569
570         tcm_vhost_put_inflight(tv_cmd->inflight);
571
572         kfree(tv_cmd);
573 }
574
575 static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
576         struct tcm_vhost_evt *evt)
577 {
578         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
579         struct virtio_scsi_event *event = &evt->event;
580         struct virtio_scsi_event __user *eventp;
581         unsigned out, in;
582         int head, ret;
583
584         if (!vq->private_data) {
585                 vs->vs_events_missed = true;
586                 return;
587         }
588
589 again:
590         vhost_disable_notify(&vs->dev, vq);
591         head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
592                         ARRAY_SIZE(vq->iov), &out, &in,
593                         NULL, NULL);
594         if (head < 0) {
595                 vs->vs_events_missed = true;
596                 return;
597         }
598         if (head == vq->num) {
599                 if (vhost_enable_notify(&vs->dev, vq))
600                         goto again;
601                 vs->vs_events_missed = true;
602                 return;
603         }
604
605         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
606                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
607                                 vq->iov[out].iov_len);
608                 vs->vs_events_missed = true;
609                 return;
610         }
611
612         if (vs->vs_events_missed) {
613                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
614                 vs->vs_events_missed = false;
615         }
616
617         eventp = vq->iov[out].iov_base;
618         ret = __copy_to_user(eventp, event, sizeof(*event));
619         if (!ret)
620                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
621         else
622                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
623 }
624
625 static void tcm_vhost_evt_work(struct vhost_work *work)
626 {
627         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
628                                         vs_event_work);
629         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
630         struct tcm_vhost_evt *evt;
631         struct llist_node *llnode;
632
633         mutex_lock(&vq->mutex);
634         llnode = llist_del_all(&vs->vs_event_list);
635         while (llnode) {
636                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
637                 llnode = llist_next(llnode);
638                 tcm_vhost_do_evt_work(vs, evt);
639                 tcm_vhost_free_evt(vs, evt);
640         }
641         mutex_unlock(&vq->mutex);
642 }
643
644 /* Fill in status and signal that we are done processing this command
645  *
646  * This is scheduled in the vhost work queue so we are called with the owner
647  * process mm and can access the vring.
648  */
649 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
650 {
651         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
652                                         vs_completion_work);
653         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
654         struct virtio_scsi_cmd_resp v_rsp;
655         struct tcm_vhost_cmd *tv_cmd;
656         struct llist_node *llnode;
657         struct se_cmd *se_cmd;
658         int ret, vq;
659
660         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
661         llnode = llist_del_all(&vs->vs_completion_list);
662         while (llnode) {
663                 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
664                                      tvc_completion_list);
665                 llnode = llist_next(llnode);
666                 se_cmd = &tv_cmd->tvc_se_cmd;
667
668                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
669                         tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
670
671                 memset(&v_rsp, 0, sizeof(v_rsp));
672                 v_rsp.resid = se_cmd->residual_count;
673                 /* TODO is status_qualifier field needed? */
674                 v_rsp.status = se_cmd->scsi_status;
675                 v_rsp.sense_len = se_cmd->scsi_sense_length;
676                 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
677                        v_rsp.sense_len);
678                 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
679                 if (likely(ret == 0)) {
680                         struct vhost_scsi_virtqueue *q;
681                         vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
682                         q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
683                         vq = q - vs->vqs;
684                         __set_bit(vq, signal);
685                 } else
686                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
687
688                 vhost_scsi_free_cmd(tv_cmd);
689         }
690
691         vq = -1;
692         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
693                 < VHOST_SCSI_MAX_VQ)
694                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
695 }
696
697 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
698         struct vhost_virtqueue *vq,
699         struct tcm_vhost_tpg *tv_tpg,
700         struct virtio_scsi_cmd_req *v_req,
701         u32 exp_data_len,
702         int data_direction)
703 {
704         struct tcm_vhost_cmd *tv_cmd;
705         struct tcm_vhost_nexus *tv_nexus;
706
707         tv_nexus = tv_tpg->tpg_nexus;
708         if (!tv_nexus) {
709                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
710                 return ERR_PTR(-EIO);
711         }
712
713         tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
714         if (!tv_cmd) {
715                 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
716                 return ERR_PTR(-ENOMEM);
717         }
718         tv_cmd->tvc_tag = v_req->tag;
719         tv_cmd->tvc_task_attr = v_req->task_attr;
720         tv_cmd->tvc_exp_data_len = exp_data_len;
721         tv_cmd->tvc_data_direction = data_direction;
722         tv_cmd->tvc_nexus = tv_nexus;
723         tv_cmd->inflight = tcm_vhost_get_inflight(vq);
724
725         return tv_cmd;
726 }
727
728 /*
729  * Map a user memory range into a scatterlist
730  *
731  * Returns the number of scatterlist entries used or -errno on error.
732  */
733 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
734         unsigned int sgl_count, struct iovec *iov, int write)
735 {
736         unsigned int npages = 0, pages_nr, offset, nbytes;
737         struct scatterlist *sg = sgl;
738         void __user *ptr = iov->iov_base;
739         size_t len = iov->iov_len;
740         struct page **pages;
741         int ret, i;
742
743         pages_nr = iov_num_pages(iov);
744         if (pages_nr > sgl_count)
745                 return -ENOBUFS;
746
747         pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
748         if (!pages)
749                 return -ENOMEM;
750
751         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
752         /* No pages were pinned */
753         if (ret < 0)
754                 goto out;
755         /* Less pages pinned than wanted */
756         if (ret != pages_nr) {
757                 for (i = 0; i < ret; i++)
758                         put_page(pages[i]);
759                 ret = -EFAULT;
760                 goto out;
761         }
762
763         while (len > 0) {
764                 offset = (uintptr_t)ptr & ~PAGE_MASK;
765                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
766                 sg_set_page(sg, pages[npages], nbytes, offset);
767                 ptr += nbytes;
768                 len -= nbytes;
769                 sg++;
770                 npages++;
771         }
772
773 out:
774         kfree(pages);
775         return ret;
776 }
777
778 static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
779         struct iovec *iov, unsigned int niov, int write)
780 {
781         int ret;
782         unsigned int i;
783         u32 sgl_count;
784         struct scatterlist *sg;
785
786         /*
787          * Find out how long sglist needs to be
788          */
789         sgl_count = 0;
790         for (i = 0; i < niov; i++)
791                 sgl_count += iov_num_pages(&iov[i]);
792
793         /* TODO overflow checking */
794
795         sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
796         if (!sg)
797                 return -ENOMEM;
798         pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
799                sg, sgl_count, !sg);
800         sg_init_table(sg, sgl_count);
801
802         tv_cmd->tvc_sgl = sg;
803         tv_cmd->tvc_sgl_count = sgl_count;
804
805         pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
806         for (i = 0; i < niov; i++) {
807                 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
808                 if (ret < 0) {
809                         for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
810                                 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
811                         kfree(tv_cmd->tvc_sgl);
812                         tv_cmd->tvc_sgl = NULL;
813                         tv_cmd->tvc_sgl_count = 0;
814                         return ret;
815                 }
816
817                 sg += ret;
818                 sgl_count -= ret;
819         }
820         return 0;
821 }
822
823 static int vhost_scsi_to_tcm_attr(int attr)
824 {
825         switch (attr) {
826         case VIRTIO_SCSI_S_SIMPLE:
827                 return MSG_SIMPLE_TAG;
828         case VIRTIO_SCSI_S_ORDERED:
829                 return MSG_ORDERED_TAG;
830         case VIRTIO_SCSI_S_HEAD:
831                 return MSG_HEAD_TAG;
832         case VIRTIO_SCSI_S_ACA:
833                 return MSG_ACA_TAG;
834         default:
835                 break;
836         }
837         return MSG_SIMPLE_TAG;
838 }
839
840 static void tcm_vhost_submission_work(struct work_struct *work)
841 {
842         struct tcm_vhost_cmd *tv_cmd =
843                 container_of(work, struct tcm_vhost_cmd, work);
844         struct tcm_vhost_nexus *tv_nexus;
845         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
846         struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
847         int rc, sg_no_bidi = 0;
848
849         if (tv_cmd->tvc_sgl_count) {
850                 sg_ptr = tv_cmd->tvc_sgl;
851 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
852 #if 0
853                 if (se_cmd->se_cmd_flags & SCF_BIDI) {
854                         sg_bidi_ptr = NULL;
855                         sg_no_bidi = 0;
856                 }
857 #endif
858         } else {
859                 sg_ptr = NULL;
860         }
861         tv_nexus = tv_cmd->tvc_nexus;
862
863         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
864                         tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
865                         tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
866                         vhost_scsi_to_tcm_attr(tv_cmd->tvc_task_attr),
867                         tv_cmd->tvc_data_direction, 0, sg_ptr,
868                         tv_cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi);
869         if (rc < 0) {
870                 transport_send_check_condition_and_sense(se_cmd,
871                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
872                 transport_generic_free_cmd(se_cmd, 0);
873         }
874 }
875
876 static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
877         struct vhost_virtqueue *vq, int head, unsigned out)
878 {
879         struct virtio_scsi_cmd_resp __user *resp;
880         struct virtio_scsi_cmd_resp rsp;
881         int ret;
882
883         memset(&rsp, 0, sizeof(rsp));
884         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
885         resp = vq->iov[out].iov_base;
886         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
887         if (!ret)
888                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
889         else
890                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
891 }
892
893 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
894         struct vhost_virtqueue *vq)
895 {
896         struct tcm_vhost_tpg **vs_tpg;
897         struct virtio_scsi_cmd_req v_req;
898         struct tcm_vhost_tpg *tv_tpg;
899         struct tcm_vhost_cmd *tv_cmd;
900         u32 exp_data_len, data_first, data_num, data_direction;
901         unsigned out, in, i;
902         int head, ret;
903         u8 target;
904
905         /*
906          * We can handle the vq only after the endpoint is setup by calling the
907          * VHOST_SCSI_SET_ENDPOINT ioctl.
908          *
909          * TODO: Check that we are running from vhost_worker which acts
910          * as read-side critical section for vhost kind of RCU.
911          * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
912          */
913         vs_tpg = rcu_dereference_check(vq->private_data, 1);
914         if (!vs_tpg)
915                 return;
916
917         mutex_lock(&vq->mutex);
918         vhost_disable_notify(&vs->dev, vq);
919
920         for (;;) {
921                 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
922                                         ARRAY_SIZE(vq->iov), &out, &in,
923                                         NULL, NULL);
924                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
925                                         head, out, in);
926                 /* On error, stop handling until the next kick. */
927                 if (unlikely(head < 0))
928                         break;
929                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
930                 if (head == vq->num) {
931                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
932                                 vhost_disable_notify(&vs->dev, vq);
933                                 continue;
934                         }
935                         break;
936                 }
937
938 /* FIXME: BIDI operation */
939                 if (out == 1 && in == 1) {
940                         data_direction = DMA_NONE;
941                         data_first = 0;
942                         data_num = 0;
943                 } else if (out == 1 && in > 1) {
944                         data_direction = DMA_FROM_DEVICE;
945                         data_first = out + 1;
946                         data_num = in - 1;
947                 } else if (out > 1 && in == 1) {
948                         data_direction = DMA_TO_DEVICE;
949                         data_first = 1;
950                         data_num = out - 1;
951                 } else {
952                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
953                                         out, in);
954                         break;
955                 }
956
957                 /*
958                  * Check for a sane resp buffer so we can report errors to
959                  * the guest.
960                  */
961                 if (unlikely(vq->iov[out].iov_len !=
962                                         sizeof(struct virtio_scsi_cmd_resp))) {
963                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
964                                 " bytes\n", vq->iov[out].iov_len);
965                         break;
966                 }
967
968                 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
969                         vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
970                                 " bytes\n", vq->iov[0].iov_len);
971                         break;
972                 }
973                 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
974                         " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
975                 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
976                                 sizeof(v_req));
977                 if (unlikely(ret)) {
978                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
979                         break;
980                 }
981
982                 /* Extract the tpgt */
983                 target = v_req.lun[1];
984                 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
985
986                 /* Target does not exist, fail the request */
987                 if (unlikely(!tv_tpg)) {
988                         vhost_scsi_send_bad_target(vs, vq, head, out);
989                         continue;
990                 }
991
992                 exp_data_len = 0;
993                 for (i = 0; i < data_num; i++)
994                         exp_data_len += vq->iov[data_first + i].iov_len;
995
996                 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
997                                         exp_data_len, data_direction);
998                 if (IS_ERR(tv_cmd)) {
999                         vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
1000                                         PTR_ERR(tv_cmd));
1001                         goto err_cmd;
1002                 }
1003                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1004                         ": %d\n", tv_cmd, exp_data_len, data_direction);
1005
1006                 tv_cmd->tvc_vhost = vs;
1007                 tv_cmd->tvc_vq = vq;
1008                 tv_cmd->tvc_resp = vq->iov[out].iov_base;
1009
1010                 /*
1011                  * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
1012                  * that will be used by tcm_vhost_new_cmd_map() and down into
1013                  * target_setup_cmd_from_cdb()
1014                  */
1015                 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1016                 /*
1017                  * Check that the recieved CDB size does not exceeded our
1018                  * hardcoded max for tcm_vhost
1019                  */
1020                 /* TODO what if cdb was too small for varlen cdb header? */
1021                 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
1022                                         TCM_VHOST_MAX_CDB_SIZE)) {
1023                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1024                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1025                                 scsi_command_size(tv_cmd->tvc_cdb),
1026                                 TCM_VHOST_MAX_CDB_SIZE);
1027                         goto err_free;
1028                 }
1029                 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1030
1031                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1032                         tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
1033
1034                 if (data_direction != DMA_NONE) {
1035                         ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
1036                                         &vq->iov[data_first], data_num,
1037                                         data_direction == DMA_FROM_DEVICE);
1038                         if (unlikely(ret)) {
1039                                 vq_err(vq, "Failed to map iov to sgl\n");
1040                                 goto err_free;
1041                         }
1042                 }
1043
1044                 /*
1045                  * Save the descriptor from vhost_get_vq_desc() to be used to
1046                  * complete the virtio-scsi request in TCM callback context via
1047                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1048                  */
1049                 tv_cmd->tvc_vq_desc = head;
1050                 /*
1051                  * Dispatch tv_cmd descriptor for cmwq execution in process
1052                  * context provided by tcm_vhost_workqueue.  This also ensures
1053                  * tv_cmd is executed on the same kworker CPU as this vhost
1054                  * thread to gain positive L2 cache locality effects..
1055                  */
1056                 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
1057                 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
1058         }
1059
1060         mutex_unlock(&vq->mutex);
1061         return;
1062
1063 err_free:
1064         vhost_scsi_free_cmd(tv_cmd);
1065 err_cmd:
1066         vhost_scsi_send_bad_target(vs, vq, head, out);
1067         mutex_unlock(&vq->mutex);
1068 }
1069
1070 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1071 {
1072         pr_debug("%s: The handling func for control queue.\n", __func__);
1073 }
1074
1075 static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
1076         struct se_lun *lun, u32 event, u32 reason)
1077 {
1078         struct tcm_vhost_evt *evt;
1079
1080         evt = tcm_vhost_allocate_evt(vs, event, reason);
1081         if (!evt)
1082                 return;
1083
1084         if (tpg && lun) {
1085                 /* TODO: share lun setup code with virtio-scsi.ko */
1086                 /*
1087                  * Note: evt->event is zeroed when we allocate it and
1088                  * lun[4-7] need to be zero according to virtio-scsi spec.
1089                  */
1090                 evt->event.lun[0] = 0x01;
1091                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1092                 if (lun->unpacked_lun >= 256)
1093                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1094                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1095         }
1096
1097         llist_add(&evt->list, &vs->vs_event_list);
1098         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1099 }
1100
1101 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1102 {
1103         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1104                                                 poll.work);
1105         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1106
1107         mutex_lock(&vq->mutex);
1108         if (!vq->private_data)
1109                 goto out;
1110
1111         if (vs->vs_events_missed)
1112                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1113 out:
1114         mutex_unlock(&vq->mutex);
1115 }
1116
1117 static void vhost_scsi_handle_kick(struct vhost_work *work)
1118 {
1119         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1120                                                 poll.work);
1121         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1122
1123         vhost_scsi_handle_vq(vs, vq);
1124 }
1125
1126 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1127 {
1128         vhost_poll_flush(&vs->vqs[index].vq.poll);
1129 }
1130
1131 /* Callers must hold dev mutex */
1132 static void vhost_scsi_flush(struct vhost_scsi *vs)
1133 {
1134         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1135         int i;
1136
1137         /* Init new inflight and remember the old inflight */
1138         tcm_vhost_init_inflight(vs, old_inflight);
1139
1140         /*
1141          * The inflight->kref was initialized to 1. We decrement it here to
1142          * indicate the start of the flush operation so that it will reach 0
1143          * when all the reqs are finished.
1144          */
1145         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1146                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1147
1148         /* Flush both the vhost poll and vhost work */
1149         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1150                 vhost_scsi_flush_vq(vs, i);
1151         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1152         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1153
1154         /* Wait for all reqs issued before the flush to be finished */
1155         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1156                 wait_for_completion(&old_inflight[i]->comp);
1157 }
1158
1159 /*
1160  * Called from vhost_scsi_ioctl() context to walk the list of available
1161  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1162  *
1163  *  The lock nesting rule is:
1164  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1165  */
1166 static int vhost_scsi_set_endpoint(
1167         struct vhost_scsi *vs,
1168         struct vhost_scsi_target *t)
1169 {
1170         struct se_portal_group *se_tpg;
1171         struct tcm_vhost_tport *tv_tport;
1172         struct tcm_vhost_tpg *tv_tpg;
1173         struct tcm_vhost_tpg **vs_tpg;
1174         struct vhost_virtqueue *vq;
1175         int index, ret, i, len;
1176         bool match = false;
1177
1178         mutex_lock(&tcm_vhost_mutex);
1179         mutex_lock(&vs->dev.mutex);
1180
1181         /* Verify that ring has been setup correctly. */
1182         for (index = 0; index < vs->dev.nvqs; ++index) {
1183                 /* Verify that ring has been setup correctly. */
1184                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1185                         ret = -EFAULT;
1186                         goto out;
1187                 }
1188         }
1189
1190         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1191         vs_tpg = kzalloc(len, GFP_KERNEL);
1192         if (!vs_tpg) {
1193                 ret = -ENOMEM;
1194                 goto out;
1195         }
1196         if (vs->vs_tpg)
1197                 memcpy(vs_tpg, vs->vs_tpg, len);
1198
1199         list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1200                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1201                 if (!tv_tpg->tpg_nexus) {
1202                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1203                         continue;
1204                 }
1205                 if (tv_tpg->tv_tpg_vhost_count != 0) {
1206                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1207                         continue;
1208                 }
1209                 tv_tport = tv_tpg->tport;
1210
1211                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1212                         if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
1213                                 kfree(vs_tpg);
1214                                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1215                                 ret = -EEXIST;
1216                                 goto out;
1217                         }
1218                         /*
1219                          * In order to ensure individual vhost-scsi configfs
1220                          * groups cannot be removed while in use by vhost ioctl,
1221                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1222                          * dependency now.
1223                          */
1224                         se_tpg = &tv_tpg->se_tpg;
1225                         ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1226                                                    &se_tpg->tpg_group.cg_item);
1227                         if (ret) {
1228                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1229                                 kfree(vs_tpg);
1230                                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1231                                 goto out;
1232                         }
1233                         tv_tpg->tv_tpg_vhost_count++;
1234                         tv_tpg->vhost_scsi = vs;
1235                         vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1236                         smp_mb__after_atomic_inc();
1237                         match = true;
1238                 }
1239                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1240         }
1241
1242         if (match) {
1243                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1244                        sizeof(vs->vs_vhost_wwpn));
1245                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1246                         vq = &vs->vqs[i].vq;
1247                         /* Flushing the vhost_work acts as synchronize_rcu */
1248                         mutex_lock(&vq->mutex);
1249                         rcu_assign_pointer(vq->private_data, vs_tpg);
1250                         vhost_init_used(vq);
1251                         mutex_unlock(&vq->mutex);
1252                 }
1253                 ret = 0;
1254         } else {
1255                 ret = -EEXIST;
1256         }
1257
1258         /*
1259          * Act as synchronize_rcu to make sure access to
1260          * old vs->vs_tpg is finished.
1261          */
1262         vhost_scsi_flush(vs);
1263         kfree(vs->vs_tpg);
1264         vs->vs_tpg = vs_tpg;
1265
1266 out:
1267         mutex_unlock(&vs->dev.mutex);
1268         mutex_unlock(&tcm_vhost_mutex);
1269         return ret;
1270 }
1271
1272 static int vhost_scsi_clear_endpoint(
1273         struct vhost_scsi *vs,
1274         struct vhost_scsi_target *t)
1275 {
1276         struct se_portal_group *se_tpg;
1277         struct tcm_vhost_tport *tv_tport;
1278         struct tcm_vhost_tpg *tv_tpg;
1279         struct vhost_virtqueue *vq;
1280         bool match = false;
1281         int index, ret, i;
1282         u8 target;
1283
1284         mutex_lock(&tcm_vhost_mutex);
1285         mutex_lock(&vs->dev.mutex);
1286         /* Verify that ring has been setup correctly. */
1287         for (index = 0; index < vs->dev.nvqs; ++index) {
1288                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1289                         ret = -EFAULT;
1290                         goto err_dev;
1291                 }
1292         }
1293
1294         if (!vs->vs_tpg) {
1295                 ret = 0;
1296                 goto err_dev;
1297         }
1298
1299         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1300                 target = i;
1301                 tv_tpg = vs->vs_tpg[target];
1302                 if (!tv_tpg)
1303                         continue;
1304
1305                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1306                 tv_tport = tv_tpg->tport;
1307                 if (!tv_tport) {
1308                         ret = -ENODEV;
1309                         goto err_tpg;
1310                 }
1311
1312                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1313                         pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1314                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1315                                 tv_tport->tport_name, tv_tpg->tport_tpgt,
1316                                 t->vhost_wwpn, t->vhost_tpgt);
1317                         ret = -EINVAL;
1318                         goto err_tpg;
1319                 }
1320                 tv_tpg->tv_tpg_vhost_count--;
1321                 tv_tpg->vhost_scsi = NULL;
1322                 vs->vs_tpg[target] = NULL;
1323                 match = true;
1324                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1325                 /*
1326                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1327                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1328                  */
1329                 se_tpg = &tv_tpg->se_tpg;
1330                 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1331                                        &se_tpg->tpg_group.cg_item);
1332         }
1333         if (match) {
1334                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1335                         vq = &vs->vqs[i].vq;
1336                         /* Flushing the vhost_work acts as synchronize_rcu */
1337                         mutex_lock(&vq->mutex);
1338                         rcu_assign_pointer(vq->private_data, NULL);
1339                         mutex_unlock(&vq->mutex);
1340                 }
1341         }
1342         /*
1343          * Act as synchronize_rcu to make sure access to
1344          * old vs->vs_tpg is finished.
1345          */
1346         vhost_scsi_flush(vs);
1347         kfree(vs->vs_tpg);
1348         vs->vs_tpg = NULL;
1349         WARN_ON(vs->vs_events_nr);
1350         mutex_unlock(&vs->dev.mutex);
1351         mutex_unlock(&tcm_vhost_mutex);
1352         return 0;
1353
1354 err_tpg:
1355         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1356 err_dev:
1357         mutex_unlock(&vs->dev.mutex);
1358         mutex_unlock(&tcm_vhost_mutex);
1359         return ret;
1360 }
1361
1362 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1363 {
1364         if (features & ~VHOST_SCSI_FEATURES)
1365                 return -EOPNOTSUPP;
1366
1367         mutex_lock(&vs->dev.mutex);
1368         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1369             !vhost_log_access_ok(&vs->dev)) {
1370                 mutex_unlock(&vs->dev.mutex);
1371                 return -EFAULT;
1372         }
1373         vs->dev.acked_features = features;
1374         smp_wmb();
1375         vhost_scsi_flush(vs);
1376         mutex_unlock(&vs->dev.mutex);
1377         return 0;
1378 }
1379
1380 static int vhost_scsi_open(struct inode *inode, struct file *f)
1381 {
1382         struct vhost_scsi *s;
1383         struct vhost_virtqueue **vqs;
1384         int r, i;
1385
1386         s = kzalloc(sizeof(*s), GFP_KERNEL);
1387         if (!s)
1388                 return -ENOMEM;
1389
1390         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1391         if (!vqs) {
1392                 kfree(s);
1393                 return -ENOMEM;
1394         }
1395
1396         vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1397         vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1398
1399         s->vs_events_nr = 0;
1400         s->vs_events_missed = false;
1401
1402         vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1403         vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1404         s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1405         s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1406         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1407                 vqs[i] = &s->vqs[i].vq;
1408                 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1409         }
1410         r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1411
1412         tcm_vhost_init_inflight(s, NULL);
1413
1414         if (r < 0) {
1415                 kfree(vqs);
1416                 kfree(s);
1417                 return r;
1418         }
1419
1420         f->private_data = s;
1421         return 0;
1422 }
1423
1424 static int vhost_scsi_release(struct inode *inode, struct file *f)
1425 {
1426         struct vhost_scsi *s = f->private_data;
1427         struct vhost_scsi_target t;
1428
1429         mutex_lock(&s->dev.mutex);
1430         memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1431         mutex_unlock(&s->dev.mutex);
1432         vhost_scsi_clear_endpoint(s, &t);
1433         vhost_dev_stop(&s->dev);
1434         vhost_dev_cleanup(&s->dev, false);
1435         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1436         vhost_scsi_flush(s);
1437         kfree(s->dev.vqs);
1438         kfree(s);
1439         return 0;
1440 }
1441
1442 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1443                                 unsigned long arg)
1444 {
1445         struct vhost_scsi *vs = f->private_data;
1446         struct vhost_scsi_target backend;
1447         void __user *argp = (void __user *)arg;
1448         u64 __user *featurep = argp;
1449         u32 __user *eventsp = argp;
1450         u32 events_missed;
1451         u64 features;
1452         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1453         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1454
1455         switch (ioctl) {
1456         case VHOST_SCSI_SET_ENDPOINT:
1457                 if (copy_from_user(&backend, argp, sizeof backend))
1458                         return -EFAULT;
1459                 if (backend.reserved != 0)
1460                         return -EOPNOTSUPP;
1461
1462                 return vhost_scsi_set_endpoint(vs, &backend);
1463         case VHOST_SCSI_CLEAR_ENDPOINT:
1464                 if (copy_from_user(&backend, argp, sizeof backend))
1465                         return -EFAULT;
1466                 if (backend.reserved != 0)
1467                         return -EOPNOTSUPP;
1468
1469                 return vhost_scsi_clear_endpoint(vs, &backend);
1470         case VHOST_SCSI_GET_ABI_VERSION:
1471                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1472                         return -EFAULT;
1473                 return 0;
1474         case VHOST_SCSI_SET_EVENTS_MISSED:
1475                 if (get_user(events_missed, eventsp))
1476                         return -EFAULT;
1477                 mutex_lock(&vq->mutex);
1478                 vs->vs_events_missed = events_missed;
1479                 mutex_unlock(&vq->mutex);
1480                 return 0;
1481         case VHOST_SCSI_GET_EVENTS_MISSED:
1482                 mutex_lock(&vq->mutex);
1483                 events_missed = vs->vs_events_missed;
1484                 mutex_unlock(&vq->mutex);
1485                 if (put_user(events_missed, eventsp))
1486                         return -EFAULT;
1487                 return 0;
1488         case VHOST_GET_FEATURES:
1489                 features = VHOST_SCSI_FEATURES;
1490                 if (copy_to_user(featurep, &features, sizeof features))
1491                         return -EFAULT;
1492                 return 0;
1493         case VHOST_SET_FEATURES:
1494                 if (copy_from_user(&features, featurep, sizeof features))
1495                         return -EFAULT;
1496                 return vhost_scsi_set_features(vs, features);
1497         default:
1498                 mutex_lock(&vs->dev.mutex);
1499                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1500                 /* TODO: flush backend after dev ioctl. */
1501                 if (r == -ENOIOCTLCMD)
1502                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1503                 mutex_unlock(&vs->dev.mutex);
1504                 return r;
1505         }
1506 }
1507
1508 #ifdef CONFIG_COMPAT
1509 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1510                                 unsigned long arg)
1511 {
1512         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1513 }
1514 #endif
1515
1516 static const struct file_operations vhost_scsi_fops = {
1517         .owner          = THIS_MODULE,
1518         .release        = vhost_scsi_release,
1519         .unlocked_ioctl = vhost_scsi_ioctl,
1520 #ifdef CONFIG_COMPAT
1521         .compat_ioctl   = vhost_scsi_compat_ioctl,
1522 #endif
1523         .open           = vhost_scsi_open,
1524         .llseek         = noop_llseek,
1525 };
1526
1527 static struct miscdevice vhost_scsi_misc = {
1528         MISC_DYNAMIC_MINOR,
1529         "vhost-scsi",
1530         &vhost_scsi_fops,
1531 };
1532
1533 static int __init vhost_scsi_register(void)
1534 {
1535         return misc_register(&vhost_scsi_misc);
1536 }
1537
1538 static int vhost_scsi_deregister(void)
1539 {
1540         return misc_deregister(&vhost_scsi_misc);
1541 }
1542
1543 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1544 {
1545         switch (tport->tport_proto_id) {
1546         case SCSI_PROTOCOL_SAS:
1547                 return "SAS";
1548         case SCSI_PROTOCOL_FCP:
1549                 return "FCP";
1550         case SCSI_PROTOCOL_ISCSI:
1551                 return "iSCSI";
1552         default:
1553                 break;
1554         }
1555
1556         return "Unknown";
1557 }
1558
1559 static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1560         struct se_lun *lun, bool plug)
1561 {
1562
1563         struct vhost_scsi *vs = tpg->vhost_scsi;
1564         struct vhost_virtqueue *vq;
1565         u32 reason;
1566
1567         if (!vs)
1568                 return;
1569
1570         mutex_lock(&vs->dev.mutex);
1571         if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1572                 mutex_unlock(&vs->dev.mutex);
1573                 return;
1574         }
1575
1576         if (plug)
1577                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1578         else
1579                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1580
1581         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1582         mutex_lock(&vq->mutex);
1583         tcm_vhost_send_evt(vs, tpg, lun,
1584                         VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1585         mutex_unlock(&vq->mutex);
1586         mutex_unlock(&vs->dev.mutex);
1587 }
1588
1589 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1590 {
1591         tcm_vhost_do_plug(tpg, lun, true);
1592 }
1593
1594 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1595 {
1596         tcm_vhost_do_plug(tpg, lun, false);
1597 }
1598
1599 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1600         struct se_lun *lun)
1601 {
1602         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1603                                 struct tcm_vhost_tpg, se_tpg);
1604
1605         mutex_lock(&tcm_vhost_mutex);
1606
1607         mutex_lock(&tv_tpg->tv_tpg_mutex);
1608         tv_tpg->tv_tpg_port_count++;
1609         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1610
1611         tcm_vhost_hotplug(tv_tpg, lun);
1612
1613         mutex_unlock(&tcm_vhost_mutex);
1614
1615         return 0;
1616 }
1617
1618 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1619         struct se_lun *lun)
1620 {
1621         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1622                                 struct tcm_vhost_tpg, se_tpg);
1623
1624         mutex_lock(&tcm_vhost_mutex);
1625
1626         mutex_lock(&tv_tpg->tv_tpg_mutex);
1627         tv_tpg->tv_tpg_port_count--;
1628         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1629
1630         tcm_vhost_hotunplug(tv_tpg, lun);
1631
1632         mutex_unlock(&tcm_vhost_mutex);
1633 }
1634
1635 static struct se_node_acl *tcm_vhost_make_nodeacl(
1636         struct se_portal_group *se_tpg,
1637         struct config_group *group,
1638         const char *name)
1639 {
1640         struct se_node_acl *se_nacl, *se_nacl_new;
1641         struct tcm_vhost_nacl *nacl;
1642         u64 wwpn = 0;
1643         u32 nexus_depth;
1644
1645         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1646                 return ERR_PTR(-EINVAL); */
1647         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1648         if (!se_nacl_new)
1649                 return ERR_PTR(-ENOMEM);
1650
1651         nexus_depth = 1;
1652         /*
1653          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1654          * when converting a NodeACL from demo mode -> explict
1655          */
1656         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1657                                 name, nexus_depth);
1658         if (IS_ERR(se_nacl)) {
1659                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1660                 return se_nacl;
1661         }
1662         /*
1663          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1664          */
1665         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1666         nacl->iport_wwpn = wwpn;
1667
1668         return se_nacl;
1669 }
1670
1671 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1672 {
1673         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1674                                 struct tcm_vhost_nacl, se_node_acl);
1675         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1676         kfree(nacl);
1677 }
1678
1679 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1680         const char *name)
1681 {
1682         struct se_portal_group *se_tpg;
1683         struct tcm_vhost_nexus *tv_nexus;
1684
1685         mutex_lock(&tv_tpg->tv_tpg_mutex);
1686         if (tv_tpg->tpg_nexus) {
1687                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1688                 pr_debug("tv_tpg->tpg_nexus already exists\n");
1689                 return -EEXIST;
1690         }
1691         se_tpg = &tv_tpg->se_tpg;
1692
1693         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1694         if (!tv_nexus) {
1695                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1696                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1697                 return -ENOMEM;
1698         }
1699         /*
1700          *  Initialize the struct se_session pointer
1701          */
1702         tv_nexus->tvn_se_sess = transport_init_session();
1703         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1704                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1705                 kfree(tv_nexus);
1706                 return -ENOMEM;
1707         }
1708         /*
1709          * Since we are running in 'demo mode' this call with generate a
1710          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1711          * the SCSI Initiator port name of the passed configfs group 'name'.
1712          */
1713         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1714                                 se_tpg, (unsigned char *)name);
1715         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1716                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1717                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1718                                 " for %s\n", name);
1719                 transport_free_session(tv_nexus->tvn_se_sess);
1720                 kfree(tv_nexus);
1721                 return -ENOMEM;
1722         }
1723         /*
1724          * Now register the TCM vhost virtual I_T Nexus as active with the
1725          * call to __transport_register_session()
1726          */
1727         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1728                         tv_nexus->tvn_se_sess, tv_nexus);
1729         tv_tpg->tpg_nexus = tv_nexus;
1730
1731         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1732         return 0;
1733 }
1734
1735 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1736 {
1737         struct se_session *se_sess;
1738         struct tcm_vhost_nexus *tv_nexus;
1739
1740         mutex_lock(&tpg->tv_tpg_mutex);
1741         tv_nexus = tpg->tpg_nexus;
1742         if (!tv_nexus) {
1743                 mutex_unlock(&tpg->tv_tpg_mutex);
1744                 return -ENODEV;
1745         }
1746
1747         se_sess = tv_nexus->tvn_se_sess;
1748         if (!se_sess) {
1749                 mutex_unlock(&tpg->tv_tpg_mutex);
1750                 return -ENODEV;
1751         }
1752
1753         if (tpg->tv_tpg_port_count != 0) {
1754                 mutex_unlock(&tpg->tv_tpg_mutex);
1755                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1756                         " active TPG port count: %d\n",
1757                         tpg->tv_tpg_port_count);
1758                 return -EBUSY;
1759         }
1760
1761         if (tpg->tv_tpg_vhost_count != 0) {
1762                 mutex_unlock(&tpg->tv_tpg_mutex);
1763                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1764                         " active TPG vhost count: %d\n",
1765                         tpg->tv_tpg_vhost_count);
1766                 return -EBUSY;
1767         }
1768
1769         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1770                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1771                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1772         /*
1773          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1774          */
1775         transport_deregister_session(tv_nexus->tvn_se_sess);
1776         tpg->tpg_nexus = NULL;
1777         mutex_unlock(&tpg->tv_tpg_mutex);
1778
1779         kfree(tv_nexus);
1780         return 0;
1781 }
1782
1783 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1784         char *page)
1785 {
1786         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1787                                 struct tcm_vhost_tpg, se_tpg);
1788         struct tcm_vhost_nexus *tv_nexus;
1789         ssize_t ret;
1790
1791         mutex_lock(&tv_tpg->tv_tpg_mutex);
1792         tv_nexus = tv_tpg->tpg_nexus;
1793         if (!tv_nexus) {
1794                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1795                 return -ENODEV;
1796         }
1797         ret = snprintf(page, PAGE_SIZE, "%s\n",
1798                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1799         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1800
1801         return ret;
1802 }
1803
1804 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1805         const char *page,
1806         size_t count)
1807 {
1808         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1809                                 struct tcm_vhost_tpg, se_tpg);
1810         struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1811         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1812         int ret;
1813         /*
1814          * Shutdown the active I_T nexus if 'NULL' is passed..
1815          */
1816         if (!strncmp(page, "NULL", 4)) {
1817                 ret = tcm_vhost_drop_nexus(tv_tpg);
1818                 return (!ret) ? count : ret;
1819         }
1820         /*
1821          * Otherwise make sure the passed virtual Initiator port WWN matches
1822          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1823          * tcm_vhost_make_nexus().
1824          */
1825         if (strlen(page) >= TCM_VHOST_NAMELEN) {
1826                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1827                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
1828                 return -EINVAL;
1829         }
1830         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1831
1832         ptr = strstr(i_port, "naa.");
1833         if (ptr) {
1834                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1835                         pr_err("Passed SAS Initiator Port %s does not"
1836                                 " match target port protoid: %s\n", i_port,
1837                                 tcm_vhost_dump_proto_id(tport_wwn));
1838                         return -EINVAL;
1839                 }
1840                 port_ptr = &i_port[0];
1841                 goto check_newline;
1842         }
1843         ptr = strstr(i_port, "fc.");
1844         if (ptr) {
1845                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1846                         pr_err("Passed FCP Initiator Port %s does not"
1847                                 " match target port protoid: %s\n", i_port,
1848                                 tcm_vhost_dump_proto_id(tport_wwn));
1849                         return -EINVAL;
1850                 }
1851                 port_ptr = &i_port[3]; /* Skip over "fc." */
1852                 goto check_newline;
1853         }
1854         ptr = strstr(i_port, "iqn.");
1855         if (ptr) {
1856                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1857                         pr_err("Passed iSCSI Initiator Port %s does not"
1858                                 " match target port protoid: %s\n", i_port,
1859                                 tcm_vhost_dump_proto_id(tport_wwn));
1860                         return -EINVAL;
1861                 }
1862                 port_ptr = &i_port[0];
1863                 goto check_newline;
1864         }
1865         pr_err("Unable to locate prefix for emulated Initiator Port:"
1866                         " %s\n", i_port);
1867         return -EINVAL;
1868         /*
1869          * Clear any trailing newline for the NAA WWN
1870          */
1871 check_newline:
1872         if (i_port[strlen(i_port)-1] == '\n')
1873                 i_port[strlen(i_port)-1] = '\0';
1874
1875         ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1876         if (ret < 0)
1877                 return ret;
1878
1879         return count;
1880 }
1881
1882 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1883
1884 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1885         &tcm_vhost_tpg_nexus.attr,
1886         NULL,
1887 };
1888
1889 static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1890         struct config_group *group,
1891         const char *name)
1892 {
1893         struct tcm_vhost_tport *tport = container_of(wwn,
1894                         struct tcm_vhost_tport, tport_wwn);
1895
1896         struct tcm_vhost_tpg *tpg;
1897         unsigned long tpgt;
1898         int ret;
1899
1900         if (strstr(name, "tpgt_") != name)
1901                 return ERR_PTR(-EINVAL);
1902         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1903                 return ERR_PTR(-EINVAL);
1904
1905         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1906         if (!tpg) {
1907                 pr_err("Unable to allocate struct tcm_vhost_tpg");
1908                 return ERR_PTR(-ENOMEM);
1909         }
1910         mutex_init(&tpg->tv_tpg_mutex);
1911         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1912         tpg->tport = tport;
1913         tpg->tport_tpgt = tpgt;
1914
1915         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1916                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1917         if (ret < 0) {
1918                 kfree(tpg);
1919                 return NULL;
1920         }
1921         mutex_lock(&tcm_vhost_mutex);
1922         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1923         mutex_unlock(&tcm_vhost_mutex);
1924
1925         return &tpg->se_tpg;
1926 }
1927
1928 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1929 {
1930         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1931                                 struct tcm_vhost_tpg, se_tpg);
1932
1933         mutex_lock(&tcm_vhost_mutex);
1934         list_del(&tpg->tv_tpg_list);
1935         mutex_unlock(&tcm_vhost_mutex);
1936         /*
1937          * Release the virtual I_T Nexus for this vhost TPG
1938          */
1939         tcm_vhost_drop_nexus(tpg);
1940         /*
1941          * Deregister the se_tpg from TCM..
1942          */
1943         core_tpg_deregister(se_tpg);
1944         kfree(tpg);
1945 }
1946
1947 static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1948         struct config_group *group,
1949         const char *name)
1950 {
1951         struct tcm_vhost_tport *tport;
1952         char *ptr;
1953         u64 wwpn = 0;
1954         int off = 0;
1955
1956         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1957                 return ERR_PTR(-EINVAL); */
1958
1959         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1960         if (!tport) {
1961                 pr_err("Unable to allocate struct tcm_vhost_tport");
1962                 return ERR_PTR(-ENOMEM);
1963         }
1964         tport->tport_wwpn = wwpn;
1965         /*
1966          * Determine the emulated Protocol Identifier and Target Port Name
1967          * based on the incoming configfs directory name.
1968          */
1969         ptr = strstr(name, "naa.");
1970         if (ptr) {
1971                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1972                 goto check_len;
1973         }
1974         ptr = strstr(name, "fc.");
1975         if (ptr) {
1976                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1977                 off = 3; /* Skip over "fc." */
1978                 goto check_len;
1979         }
1980         ptr = strstr(name, "iqn.");
1981         if (ptr) {
1982                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1983                 goto check_len;
1984         }
1985
1986         pr_err("Unable to locate prefix for emulated Target Port:"
1987                         " %s\n", name);
1988         kfree(tport);
1989         return ERR_PTR(-EINVAL);
1990
1991 check_len:
1992         if (strlen(name) >= TCM_VHOST_NAMELEN) {
1993                 pr_err("Emulated %s Address: %s, exceeds"
1994                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1995                         TCM_VHOST_NAMELEN);
1996                 kfree(tport);
1997                 return ERR_PTR(-EINVAL);
1998         }
1999         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2000
2001         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2002                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2003
2004         return &tport->tport_wwn;
2005 }
2006
2007 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2008 {
2009         struct tcm_vhost_tport *tport = container_of(wwn,
2010                                 struct tcm_vhost_tport, tport_wwn);
2011
2012         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2013                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2014                 tport->tport_name);
2015
2016         kfree(tport);
2017 }
2018
2019 static ssize_t tcm_vhost_wwn_show_attr_version(
2020         struct target_fabric_configfs *tf,
2021         char *page)
2022 {
2023         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2024                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2025                 utsname()->machine);
2026 }
2027
2028 TF_WWN_ATTR_RO(tcm_vhost, version);
2029
2030 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2031         &tcm_vhost_wwn_version.attr,
2032         NULL,
2033 };
2034
2035 static struct target_core_fabric_ops tcm_vhost_ops = {
2036         .get_fabric_name                = tcm_vhost_get_fabric_name,
2037         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2038         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2039         .tpg_get_tag                    = tcm_vhost_get_tag,
2040         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2041         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2042         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2043         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2044         .tpg_check_demo_mode            = tcm_vhost_check_true,
2045         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2046         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2047         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2048         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2049         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2050         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2051         .release_cmd                    = tcm_vhost_release_cmd,
2052         .shutdown_session               = tcm_vhost_shutdown_session,
2053         .close_session                  = tcm_vhost_close_session,
2054         .sess_get_index                 = tcm_vhost_sess_get_index,
2055         .sess_get_initiator_sid         = NULL,
2056         .write_pending                  = tcm_vhost_write_pending,
2057         .write_pending_status           = tcm_vhost_write_pending_status,
2058         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2059         .get_task_tag                   = tcm_vhost_get_task_tag,
2060         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2061         .queue_data_in                  = tcm_vhost_queue_data_in,
2062         .queue_status                   = tcm_vhost_queue_status,
2063         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2064         /*
2065          * Setup callers for generic logic in target_core_fabric_configfs.c
2066          */
2067         .fabric_make_wwn                = tcm_vhost_make_tport,
2068         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2069         .fabric_make_tpg                = tcm_vhost_make_tpg,
2070         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2071         .fabric_post_link               = tcm_vhost_port_link,
2072         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2073         .fabric_make_np                 = NULL,
2074         .fabric_drop_np                 = NULL,
2075         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2076         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2077 };
2078
2079 static int tcm_vhost_register_configfs(void)
2080 {
2081         struct target_fabric_configfs *fabric;
2082         int ret;
2083
2084         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2085                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2086                 utsname()->machine);
2087         /*
2088          * Register the top level struct config_item_type with TCM core
2089          */
2090         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2091         if (IS_ERR(fabric)) {
2092                 pr_err("target_fabric_configfs_init() failed\n");
2093                 return PTR_ERR(fabric);
2094         }
2095         /*
2096          * Setup fabric->tf_ops from our local tcm_vhost_ops
2097          */
2098         fabric->tf_ops = tcm_vhost_ops;
2099         /*
2100          * Setup default attribute lists for various fabric->tf_cit_tmpl
2101          */
2102         TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2103         TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2104         TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
2105         TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2106         TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2107         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2108         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2109         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2110         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2111         /*
2112          * Register the fabric for use within TCM
2113          */
2114         ret = target_fabric_configfs_register(fabric);
2115         if (ret < 0) {
2116                 pr_err("target_fabric_configfs_register() failed"
2117                                 " for TCM_VHOST\n");
2118                 return ret;
2119         }
2120         /*
2121          * Setup our local pointer to *fabric
2122          */
2123         tcm_vhost_fabric_configfs = fabric;
2124         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2125         return 0;
2126 };
2127
2128 static void tcm_vhost_deregister_configfs(void)
2129 {
2130         if (!tcm_vhost_fabric_configfs)
2131                 return;
2132
2133         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2134         tcm_vhost_fabric_configfs = NULL;
2135         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2136 };
2137
2138 static int __init tcm_vhost_init(void)
2139 {
2140         int ret = -ENOMEM;
2141         /*
2142          * Use our own dedicated workqueue for submitting I/O into
2143          * target core to avoid contention within system_wq.
2144          */
2145         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2146         if (!tcm_vhost_workqueue)
2147                 goto out;
2148
2149         ret = vhost_scsi_register();
2150         if (ret < 0)
2151                 goto out_destroy_workqueue;
2152
2153         ret = tcm_vhost_register_configfs();
2154         if (ret < 0)
2155                 goto out_vhost_scsi_deregister;
2156
2157         return 0;
2158
2159 out_vhost_scsi_deregister:
2160         vhost_scsi_deregister();
2161 out_destroy_workqueue:
2162         destroy_workqueue(tcm_vhost_workqueue);
2163 out:
2164         return ret;
2165 };
2166
2167 static void tcm_vhost_exit(void)
2168 {
2169         tcm_vhost_deregister_configfs();
2170         vhost_scsi_deregister();
2171         destroy_workqueue(tcm_vhost_workqueue);
2172 };
2173
2174 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2175 MODULE_ALIAS("tcm_vhost");
2176 MODULE_LICENSE("GPL");
2177 module_init(tcm_vhost_init);
2178 module_exit(tcm_vhost_exit);