target/sbc: Add DIF TYPE1+TYPE3 read/write verify emulation
[firefly-linux-kernel-4.4.55.git] / drivers / target / target_core_sbc.c
1 /*
2  * SCSI Block Commands (SBC) parsing and emulation.
3  *
4  * (c) Copyright 2002-2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <linux/crc-t10dif.h>
27 #include <asm/unaligned.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_tcq.h>
30
31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric.h>
34
35 #include "target_core_internal.h"
36 #include "target_core_ua.h"
37 #include "target_core_alua.h"
38
39 static sense_reason_t
40 sbc_emulate_readcapacity(struct se_cmd *cmd)
41 {
42         struct se_device *dev = cmd->se_dev;
43         unsigned char *cdb = cmd->t_task_cdb;
44         unsigned long long blocks_long = dev->transport->get_blocks(dev);
45         unsigned char *rbuf;
46         unsigned char buf[8];
47         u32 blocks;
48
49         /*
50          * SBC-2 says:
51          *   If the PMI bit is set to zero and the LOGICAL BLOCK
52          *   ADDRESS field is not set to zero, the device server shall
53          *   terminate the command with CHECK CONDITION status with
54          *   the sense key set to ILLEGAL REQUEST and the additional
55          *   sense code set to INVALID FIELD IN CDB.
56          *
57          * In SBC-3, these fields are obsolete, but some SCSI
58          * compliance tests actually check this, so we might as well
59          * follow SBC-2.
60          */
61         if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
62                 return TCM_INVALID_CDB_FIELD;
63
64         if (blocks_long >= 0x00000000ffffffff)
65                 blocks = 0xffffffff;
66         else
67                 blocks = (u32)blocks_long;
68
69         buf[0] = (blocks >> 24) & 0xff;
70         buf[1] = (blocks >> 16) & 0xff;
71         buf[2] = (blocks >> 8) & 0xff;
72         buf[3] = blocks & 0xff;
73         buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
74         buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
75         buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
76         buf[7] = dev->dev_attrib.block_size & 0xff;
77
78         rbuf = transport_kmap_data_sg(cmd);
79         if (rbuf) {
80                 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
81                 transport_kunmap_data_sg(cmd);
82         }
83
84         target_complete_cmd(cmd, GOOD);
85         return 0;
86 }
87
88 static sense_reason_t
89 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
90 {
91         struct se_device *dev = cmd->se_dev;
92         unsigned char *rbuf;
93         unsigned char buf[32];
94         unsigned long long blocks = dev->transport->get_blocks(dev);
95
96         memset(buf, 0, sizeof(buf));
97         buf[0] = (blocks >> 56) & 0xff;
98         buf[1] = (blocks >> 48) & 0xff;
99         buf[2] = (blocks >> 40) & 0xff;
100         buf[3] = (blocks >> 32) & 0xff;
101         buf[4] = (blocks >> 24) & 0xff;
102         buf[5] = (blocks >> 16) & 0xff;
103         buf[6] = (blocks >> 8) & 0xff;
104         buf[7] = blocks & 0xff;
105         buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
106         buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
107         buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
108         buf[11] = dev->dev_attrib.block_size & 0xff;
109
110         if (dev->transport->get_lbppbe)
111                 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
112
113         if (dev->transport->get_alignment_offset_lbas) {
114                 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
115                 buf[14] = (lalba >> 8) & 0x3f;
116                 buf[15] = lalba & 0xff;
117         }
118
119         /*
120          * Set Thin Provisioning Enable bit following sbc3r22 in section
121          * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
122          */
123         if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
124                 buf[14] |= 0x80;
125
126         rbuf = transport_kmap_data_sg(cmd);
127         if (rbuf) {
128                 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
129                 transport_kunmap_data_sg(cmd);
130         }
131
132         target_complete_cmd(cmd, GOOD);
133         return 0;
134 }
135
136 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
137 {
138         u32 num_blocks;
139
140         if (cmd->t_task_cdb[0] == WRITE_SAME)
141                 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
142         else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
143                 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
144         else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
145                 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
146
147         /*
148          * Use the explicit range when non zero is supplied, otherwise calculate
149          * the remaining range based on ->get_blocks() - starting LBA.
150          */
151         if (num_blocks)
152                 return num_blocks;
153
154         return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
155                 cmd->t_task_lba + 1;
156 }
157 EXPORT_SYMBOL(sbc_get_write_same_sectors);
158
159 static sense_reason_t
160 sbc_emulate_noop(struct se_cmd *cmd)
161 {
162         target_complete_cmd(cmd, GOOD);
163         return 0;
164 }
165
166 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
167 {
168         return cmd->se_dev->dev_attrib.block_size * sectors;
169 }
170
171 static int sbc_check_valid_sectors(struct se_cmd *cmd)
172 {
173         struct se_device *dev = cmd->se_dev;
174         unsigned long long end_lba;
175         u32 sectors;
176
177         sectors = cmd->data_length / dev->dev_attrib.block_size;
178         end_lba = dev->transport->get_blocks(dev) + 1;
179
180         if (cmd->t_task_lba + sectors > end_lba) {
181                 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
182                         cmd->t_task_lba, sectors, end_lba);
183                 return -EINVAL;
184         }
185
186         return 0;
187 }
188
189 static inline u32 transport_get_sectors_6(unsigned char *cdb)
190 {
191         /*
192          * Use 8-bit sector value.  SBC-3 says:
193          *
194          *   A TRANSFER LENGTH field set to zero specifies that 256
195          *   logical blocks shall be written.  Any other value
196          *   specifies the number of logical blocks that shall be
197          *   written.
198          */
199         return cdb[4] ? : 256;
200 }
201
202 static inline u32 transport_get_sectors_10(unsigned char *cdb)
203 {
204         return (u32)(cdb[7] << 8) + cdb[8];
205 }
206
207 static inline u32 transport_get_sectors_12(unsigned char *cdb)
208 {
209         return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
210 }
211
212 static inline u32 transport_get_sectors_16(unsigned char *cdb)
213 {
214         return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
215                     (cdb[12] << 8) + cdb[13];
216 }
217
218 /*
219  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
220  */
221 static inline u32 transport_get_sectors_32(unsigned char *cdb)
222 {
223         return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
224                     (cdb[30] << 8) + cdb[31];
225
226 }
227
228 static inline u32 transport_lba_21(unsigned char *cdb)
229 {
230         return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
231 }
232
233 static inline u32 transport_lba_32(unsigned char *cdb)
234 {
235         return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
236 }
237
238 static inline unsigned long long transport_lba_64(unsigned char *cdb)
239 {
240         unsigned int __v1, __v2;
241
242         __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
243         __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
244
245         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
246 }
247
248 /*
249  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
250  */
251 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
252 {
253         unsigned int __v1, __v2;
254
255         __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
256         __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
257
258         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
259 }
260
261 static sense_reason_t
262 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
263 {
264         unsigned int sectors = sbc_get_write_same_sectors(cmd);
265
266         if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
267                 pr_err("WRITE_SAME PBDATA and LBDATA"
268                         " bits not supported for Block Discard"
269                         " Emulation\n");
270                 return TCM_UNSUPPORTED_SCSI_OPCODE;
271         }
272         if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
273                 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
274                         sectors, cmd->se_dev->dev_attrib.max_write_same_len);
275                 return TCM_INVALID_CDB_FIELD;
276         }
277         /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
278         if (flags[0] & 0x10) {
279                 pr_warn("WRITE SAME with ANCHOR not supported\n");
280                 return TCM_INVALID_CDB_FIELD;
281         }
282         /*
283          * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
284          * translated into block discard requests within backend code.
285          */
286         if (flags[0] & 0x08) {
287                 if (!ops->execute_write_same_unmap)
288                         return TCM_UNSUPPORTED_SCSI_OPCODE;
289
290                 cmd->execute_cmd = ops->execute_write_same_unmap;
291                 return 0;
292         }
293         if (!ops->execute_write_same)
294                 return TCM_UNSUPPORTED_SCSI_OPCODE;
295
296         cmd->execute_cmd = ops->execute_write_same;
297         return 0;
298 }
299
300 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
301 {
302         unsigned char *buf, *addr;
303         struct scatterlist *sg;
304         unsigned int offset;
305         sense_reason_t ret = TCM_NO_SENSE;
306         int i, count;
307         /*
308          * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
309          *
310          * 1) read the specified logical block(s);
311          * 2) transfer logical blocks from the data-out buffer;
312          * 3) XOR the logical blocks transferred from the data-out buffer with
313          *    the logical blocks read, storing the resulting XOR data in a buffer;
314          * 4) if the DISABLE WRITE bit is set to zero, then write the logical
315          *    blocks transferred from the data-out buffer; and
316          * 5) transfer the resulting XOR data to the data-in buffer.
317          */
318         buf = kmalloc(cmd->data_length, GFP_KERNEL);
319         if (!buf) {
320                 pr_err("Unable to allocate xor_callback buf\n");
321                 return TCM_OUT_OF_RESOURCES;
322         }
323         /*
324          * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
325          * into the locally allocated *buf
326          */
327         sg_copy_to_buffer(cmd->t_data_sg,
328                           cmd->t_data_nents,
329                           buf,
330                           cmd->data_length);
331
332         /*
333          * Now perform the XOR against the BIDI read memory located at
334          * cmd->t_mem_bidi_list
335          */
336
337         offset = 0;
338         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
339                 addr = kmap_atomic(sg_page(sg));
340                 if (!addr) {
341                         ret = TCM_OUT_OF_RESOURCES;
342                         goto out;
343                 }
344
345                 for (i = 0; i < sg->length; i++)
346                         *(addr + sg->offset + i) ^= *(buf + offset + i);
347
348                 offset += sg->length;
349                 kunmap_atomic(addr);
350         }
351
352 out:
353         kfree(buf);
354         return ret;
355 }
356
357 static sense_reason_t
358 sbc_execute_rw(struct se_cmd *cmd)
359 {
360         return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
361                                cmd->data_direction);
362 }
363
364 static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
365 {
366         struct se_device *dev = cmd->se_dev;
367
368         /*
369          * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
370          * within target_complete_ok_work() if the command was successfully
371          * sent to the backend driver.
372          */
373         spin_lock_irq(&cmd->t_state_lock);
374         if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
375                 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
376         spin_unlock_irq(&cmd->t_state_lock);
377
378         /*
379          * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
380          * before the original READ I/O submission.
381          */
382         up(&dev->caw_sem);
383
384         return TCM_NO_SENSE;
385 }
386
387 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
388 {
389         struct se_device *dev = cmd->se_dev;
390         struct scatterlist *write_sg = NULL, *sg;
391         unsigned char *buf = NULL, *addr;
392         struct sg_mapping_iter m;
393         unsigned int offset = 0, len;
394         unsigned int nlbas = cmd->t_task_nolb;
395         unsigned int block_size = dev->dev_attrib.block_size;
396         unsigned int compare_len = (nlbas * block_size);
397         sense_reason_t ret = TCM_NO_SENSE;
398         int rc, i;
399
400         /*
401          * Handle early failure in transport_generic_request_failure(),
402          * which will not have taken ->caw_mutex yet..
403          */
404         if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
405                 return TCM_NO_SENSE;
406         /*
407          * Immediately exit + release dev->caw_sem if command has already
408          * been failed with a non-zero SCSI status.
409          */
410         if (cmd->scsi_status) {
411                 pr_err("compare_and_write_callback: non zero scsi_status:"
412                         " 0x%02x\n", cmd->scsi_status);
413                 goto out;
414         }
415
416         buf = kzalloc(cmd->data_length, GFP_KERNEL);
417         if (!buf) {
418                 pr_err("Unable to allocate compare_and_write buf\n");
419                 ret = TCM_OUT_OF_RESOURCES;
420                 goto out;
421         }
422
423         write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
424                            GFP_KERNEL);
425         if (!write_sg) {
426                 pr_err("Unable to allocate compare_and_write sg\n");
427                 ret = TCM_OUT_OF_RESOURCES;
428                 goto out;
429         }
430         /*
431          * Setup verify and write data payloads from total NumberLBAs.
432          */
433         rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
434                                cmd->data_length);
435         if (!rc) {
436                 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
437                 ret = TCM_OUT_OF_RESOURCES;
438                 goto out;
439         }
440         /*
441          * Compare against SCSI READ payload against verify payload
442          */
443         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
444                 addr = (unsigned char *)kmap_atomic(sg_page(sg));
445                 if (!addr) {
446                         ret = TCM_OUT_OF_RESOURCES;
447                         goto out;
448                 }
449
450                 len = min(sg->length, compare_len);
451
452                 if (memcmp(addr, buf + offset, len)) {
453                         pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
454                                 addr, buf + offset);
455                         kunmap_atomic(addr);
456                         goto miscompare;
457                 }
458                 kunmap_atomic(addr);
459
460                 offset += len;
461                 compare_len -= len;
462                 if (!compare_len)
463                         break;
464         }
465
466         i = 0;
467         len = cmd->t_task_nolb * block_size;
468         sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
469         /*
470          * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
471          */
472         while (len) {
473                 sg_miter_next(&m);
474
475                 if (block_size < PAGE_SIZE) {
476                         sg_set_page(&write_sg[i], m.page, block_size,
477                                     block_size);
478                 } else {
479                         sg_miter_next(&m);
480                         sg_set_page(&write_sg[i], m.page, block_size,
481                                     0);
482                 }
483                 len -= block_size;
484                 i++;
485         }
486         sg_miter_stop(&m);
487         /*
488          * Save the original SGL + nents values before updating to new
489          * assignments, to be released in transport_free_pages() ->
490          * transport_reset_sgl_orig()
491          */
492         cmd->t_data_sg_orig = cmd->t_data_sg;
493         cmd->t_data_sg = write_sg;
494         cmd->t_data_nents_orig = cmd->t_data_nents;
495         cmd->t_data_nents = 1;
496
497         cmd->sam_task_attr = MSG_HEAD_TAG;
498         cmd->transport_complete_callback = compare_and_write_post;
499         /*
500          * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
501          * for submitting the adjusted SGL to write instance user-data.
502          */
503         cmd->execute_cmd = sbc_execute_rw;
504
505         spin_lock_irq(&cmd->t_state_lock);
506         cmd->t_state = TRANSPORT_PROCESSING;
507         cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
508         spin_unlock_irq(&cmd->t_state_lock);
509
510         __target_execute_cmd(cmd);
511
512         kfree(buf);
513         return ret;
514
515 miscompare:
516         pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
517                 dev->transport->name);
518         ret = TCM_MISCOMPARE_VERIFY;
519 out:
520         /*
521          * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
522          * sbc_compare_and_write() before the original READ I/O submission.
523          */
524         up(&dev->caw_sem);
525         kfree(write_sg);
526         kfree(buf);
527         return ret;
528 }
529
530 static sense_reason_t
531 sbc_compare_and_write(struct se_cmd *cmd)
532 {
533         struct se_device *dev = cmd->se_dev;
534         sense_reason_t ret;
535         int rc;
536         /*
537          * Submit the READ first for COMPARE_AND_WRITE to perform the
538          * comparision using SGLs at cmd->t_bidi_data_sg..
539          */
540         rc = down_interruptible(&dev->caw_sem);
541         if ((rc != 0) || signal_pending(current)) {
542                 cmd->transport_complete_callback = NULL;
543                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
544         }
545         /*
546          * Reset cmd->data_length to individual block_size in order to not
547          * confuse backend drivers that depend on this value matching the
548          * size of the I/O being submitted.
549          */
550         cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
551
552         ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
553                               DMA_FROM_DEVICE);
554         if (ret) {
555                 cmd->transport_complete_callback = NULL;
556                 up(&dev->caw_sem);
557                 return ret;
558         }
559         /*
560          * Unlock of dev->caw_sem to occur in compare_and_write_callback()
561          * upon MISCOMPARE, or in compare_and_write_done() upon completion
562          * of WRITE instance user-data.
563          */
564         return TCM_NO_SENSE;
565 }
566
567 static bool
568 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
569                u32 sectors)
570 {
571         if (!cmd->t_prot_sg || !cmd->t_prot_nents)
572                 return true;
573
574         switch (dev->dev_attrib.pi_prot_type) {
575         case TARGET_DIF_TYPE3_PROT:
576                 if (!(cdb[1] & 0xe0))
577                         return true;
578
579                 cmd->reftag_seed = 0xffffffff;
580                 break;
581         case TARGET_DIF_TYPE2_PROT:
582                 if (cdb[1] & 0xe0)
583                         return false;
584
585                 cmd->reftag_seed = cmd->t_task_lba;
586                 break;
587         case TARGET_DIF_TYPE1_PROT:
588                 if (!(cdb[1] & 0xe0))
589                         return true;
590
591                 cmd->reftag_seed = cmd->t_task_lba;
592                 break;
593         case TARGET_DIF_TYPE0_PROT:
594         default:
595                 return true;
596         }
597
598         cmd->prot_type = dev->dev_attrib.pi_prot_type;
599         cmd->prot_length = dev->prot_length * sectors;
600         cmd->prot_handover = PROT_SEPERATED;
601
602         return true;
603 }
604
605 sense_reason_t
606 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
607 {
608         struct se_device *dev = cmd->se_dev;
609         unsigned char *cdb = cmd->t_task_cdb;
610         unsigned int size;
611         u32 sectors = 0;
612         sense_reason_t ret;
613
614         switch (cdb[0]) {
615         case READ_6:
616                 sectors = transport_get_sectors_6(cdb);
617                 cmd->t_task_lba = transport_lba_21(cdb);
618                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
619                 cmd->execute_rw = ops->execute_rw;
620                 cmd->execute_cmd = sbc_execute_rw;
621                 break;
622         case READ_10:
623                 sectors = transport_get_sectors_10(cdb);
624                 cmd->t_task_lba = transport_lba_32(cdb);
625
626                 if (!sbc_check_prot(dev, cmd, cdb, sectors))
627                         return TCM_UNSUPPORTED_SCSI_OPCODE;
628
629                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
630                 cmd->execute_rw = ops->execute_rw;
631                 cmd->execute_cmd = sbc_execute_rw;
632                 break;
633         case READ_12:
634                 sectors = transport_get_sectors_12(cdb);
635                 cmd->t_task_lba = transport_lba_32(cdb);
636
637                 if (!sbc_check_prot(dev, cmd, cdb, sectors))
638                         return TCM_UNSUPPORTED_SCSI_OPCODE;
639
640                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
641                 cmd->execute_rw = ops->execute_rw;
642                 cmd->execute_cmd = sbc_execute_rw;
643                 break;
644         case READ_16:
645                 sectors = transport_get_sectors_16(cdb);
646                 cmd->t_task_lba = transport_lba_64(cdb);
647
648                 if (!sbc_check_prot(dev, cmd, cdb, sectors))
649                         return TCM_UNSUPPORTED_SCSI_OPCODE;
650
651                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
652                 cmd->execute_rw = ops->execute_rw;
653                 cmd->execute_cmd = sbc_execute_rw;
654                 break;
655         case WRITE_6:
656                 sectors = transport_get_sectors_6(cdb);
657                 cmd->t_task_lba = transport_lba_21(cdb);
658                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
659                 cmd->execute_rw = ops->execute_rw;
660                 cmd->execute_cmd = sbc_execute_rw;
661                 break;
662         case WRITE_10:
663         case WRITE_VERIFY:
664                 sectors = transport_get_sectors_10(cdb);
665                 cmd->t_task_lba = transport_lba_32(cdb);
666
667                 if (!sbc_check_prot(dev, cmd, cdb, sectors))
668                         return TCM_UNSUPPORTED_SCSI_OPCODE;
669
670                 if (cdb[1] & 0x8)
671                         cmd->se_cmd_flags |= SCF_FUA;
672                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
673                 cmd->execute_rw = ops->execute_rw;
674                 cmd->execute_cmd = sbc_execute_rw;
675                 break;
676         case WRITE_12:
677                 sectors = transport_get_sectors_12(cdb);
678                 cmd->t_task_lba = transport_lba_32(cdb);
679
680                 if (!sbc_check_prot(dev, cmd, cdb, sectors))
681                         return TCM_UNSUPPORTED_SCSI_OPCODE;
682
683                 if (cdb[1] & 0x8)
684                         cmd->se_cmd_flags |= SCF_FUA;
685                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
686                 cmd->execute_rw = ops->execute_rw;
687                 cmd->execute_cmd = sbc_execute_rw;
688                 break;
689         case WRITE_16:
690                 sectors = transport_get_sectors_16(cdb);
691                 cmd->t_task_lba = transport_lba_64(cdb);
692
693                 if (!sbc_check_prot(dev, cmd, cdb, sectors))
694                         return TCM_UNSUPPORTED_SCSI_OPCODE;
695
696                 if (cdb[1] & 0x8)
697                         cmd->se_cmd_flags |= SCF_FUA;
698                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
699                 cmd->execute_rw = ops->execute_rw;
700                 cmd->execute_cmd = sbc_execute_rw;
701                 break;
702         case XDWRITEREAD_10:
703                 if (cmd->data_direction != DMA_TO_DEVICE ||
704                     !(cmd->se_cmd_flags & SCF_BIDI))
705                         return TCM_INVALID_CDB_FIELD;
706                 sectors = transport_get_sectors_10(cdb);
707
708                 cmd->t_task_lba = transport_lba_32(cdb);
709                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
710
711                 /*
712                  * Setup BIDI XOR callback to be run after I/O completion.
713                  */
714                 cmd->execute_rw = ops->execute_rw;
715                 cmd->execute_cmd = sbc_execute_rw;
716                 cmd->transport_complete_callback = &xdreadwrite_callback;
717                 if (cdb[1] & 0x8)
718                         cmd->se_cmd_flags |= SCF_FUA;
719                 break;
720         case VARIABLE_LENGTH_CMD:
721         {
722                 u16 service_action = get_unaligned_be16(&cdb[8]);
723                 switch (service_action) {
724                 case XDWRITEREAD_32:
725                         sectors = transport_get_sectors_32(cdb);
726
727                         /*
728                          * Use WRITE_32 and READ_32 opcodes for the emulated
729                          * XDWRITE_READ_32 logic.
730                          */
731                         cmd->t_task_lba = transport_lba_64_ext(cdb);
732                         cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
733
734                         /*
735                          * Setup BIDI XOR callback to be run during after I/O
736                          * completion.
737                          */
738                         cmd->execute_rw = ops->execute_rw;
739                         cmd->execute_cmd = sbc_execute_rw;
740                         cmd->transport_complete_callback = &xdreadwrite_callback;
741                         if (cdb[1] & 0x8)
742                                 cmd->se_cmd_flags |= SCF_FUA;
743                         break;
744                 case WRITE_SAME_32:
745                         sectors = transport_get_sectors_32(cdb);
746                         if (!sectors) {
747                                 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
748                                        " supported\n");
749                                 return TCM_INVALID_CDB_FIELD;
750                         }
751
752                         size = sbc_get_size(cmd, 1);
753                         cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
754
755                         ret = sbc_setup_write_same(cmd, &cdb[10], ops);
756                         if (ret)
757                                 return ret;
758                         break;
759                 default:
760                         pr_err("VARIABLE_LENGTH_CMD service action"
761                                 " 0x%04x not supported\n", service_action);
762                         return TCM_UNSUPPORTED_SCSI_OPCODE;
763                 }
764                 break;
765         }
766         case COMPARE_AND_WRITE:
767                 sectors = cdb[13];
768                 /*
769                  * Currently enforce COMPARE_AND_WRITE for a single sector
770                  */
771                 if (sectors > 1) {
772                         pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
773                                " than 1\n", sectors);
774                         return TCM_INVALID_CDB_FIELD;
775                 }
776                 /*
777                  * Double size because we have two buffers, note that
778                  * zero is not an error..
779                  */
780                 size = 2 * sbc_get_size(cmd, sectors);
781                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
782                 cmd->t_task_nolb = sectors;
783                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
784                 cmd->execute_rw = ops->execute_rw;
785                 cmd->execute_cmd = sbc_compare_and_write;
786                 cmd->transport_complete_callback = compare_and_write_callback;
787                 break;
788         case READ_CAPACITY:
789                 size = READ_CAP_LEN;
790                 cmd->execute_cmd = sbc_emulate_readcapacity;
791                 break;
792         case SERVICE_ACTION_IN:
793                 switch (cmd->t_task_cdb[1] & 0x1f) {
794                 case SAI_READ_CAPACITY_16:
795                         cmd->execute_cmd = sbc_emulate_readcapacity_16;
796                         break;
797                 case SAI_REPORT_REFERRALS:
798                         cmd->execute_cmd = target_emulate_report_referrals;
799                         break;
800                 default:
801                         pr_err("Unsupported SA: 0x%02x\n",
802                                 cmd->t_task_cdb[1] & 0x1f);
803                         return TCM_INVALID_CDB_FIELD;
804                 }
805                 size = (cdb[10] << 24) | (cdb[11] << 16) |
806                        (cdb[12] << 8) | cdb[13];
807                 break;
808         case SYNCHRONIZE_CACHE:
809         case SYNCHRONIZE_CACHE_16:
810                 if (!ops->execute_sync_cache) {
811                         size = 0;
812                         cmd->execute_cmd = sbc_emulate_noop;
813                         break;
814                 }
815
816                 /*
817                  * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
818                  */
819                 if (cdb[0] == SYNCHRONIZE_CACHE) {
820                         sectors = transport_get_sectors_10(cdb);
821                         cmd->t_task_lba = transport_lba_32(cdb);
822                 } else {
823                         sectors = transport_get_sectors_16(cdb);
824                         cmd->t_task_lba = transport_lba_64(cdb);
825                 }
826
827                 size = sbc_get_size(cmd, sectors);
828
829                 /*
830                  * Check to ensure that LBA + Range does not exceed past end of
831                  * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
832                  */
833                 if (cmd->t_task_lba || sectors) {
834                         if (sbc_check_valid_sectors(cmd) < 0)
835                                 return TCM_ADDRESS_OUT_OF_RANGE;
836                 }
837                 cmd->execute_cmd = ops->execute_sync_cache;
838                 break;
839         case UNMAP:
840                 if (!ops->execute_unmap)
841                         return TCM_UNSUPPORTED_SCSI_OPCODE;
842
843                 size = get_unaligned_be16(&cdb[7]);
844                 cmd->execute_cmd = ops->execute_unmap;
845                 break;
846         case WRITE_SAME_16:
847                 sectors = transport_get_sectors_16(cdb);
848                 if (!sectors) {
849                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
850                         return TCM_INVALID_CDB_FIELD;
851                 }
852
853                 size = sbc_get_size(cmd, 1);
854                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
855
856                 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
857                 if (ret)
858                         return ret;
859                 break;
860         case WRITE_SAME:
861                 sectors = transport_get_sectors_10(cdb);
862                 if (!sectors) {
863                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
864                         return TCM_INVALID_CDB_FIELD;
865                 }
866
867                 size = sbc_get_size(cmd, 1);
868                 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
869
870                 /*
871                  * Follow sbcr26 with WRITE_SAME (10) and check for the existence
872                  * of byte 1 bit 3 UNMAP instead of original reserved field
873                  */
874                 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
875                 if (ret)
876                         return ret;
877                 break;
878         case VERIFY:
879                 size = 0;
880                 cmd->execute_cmd = sbc_emulate_noop;
881                 break;
882         case REZERO_UNIT:
883         case SEEK_6:
884         case SEEK_10:
885                 /*
886                  * There are still clients out there which use these old SCSI-2
887                  * commands. This mainly happens when running VMs with legacy
888                  * guest systems, connected via SCSI command pass-through to
889                  * iSCSI targets. Make them happy and return status GOOD.
890                  */
891                 size = 0;
892                 cmd->execute_cmd = sbc_emulate_noop;
893                 break;
894         default:
895                 ret = spc_parse_cdb(cmd, &size);
896                 if (ret)
897                         return ret;
898         }
899
900         /* reject any command that we don't have a handler for */
901         if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
902                 return TCM_UNSUPPORTED_SCSI_OPCODE;
903
904         if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
905                 unsigned long long end_lba;
906
907                 if (sectors > dev->dev_attrib.fabric_max_sectors) {
908                         printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
909                                 " big sectors %u exceeds fabric_max_sectors:"
910                                 " %u\n", cdb[0], sectors,
911                                 dev->dev_attrib.fabric_max_sectors);
912                         return TCM_INVALID_CDB_FIELD;
913                 }
914                 if (sectors > dev->dev_attrib.hw_max_sectors) {
915                         printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
916                                 " big sectors %u exceeds backend hw_max_sectors:"
917                                 " %u\n", cdb[0], sectors,
918                                 dev->dev_attrib.hw_max_sectors);
919                         return TCM_INVALID_CDB_FIELD;
920                 }
921
922                 end_lba = dev->transport->get_blocks(dev) + 1;
923                 if (cmd->t_task_lba + sectors > end_lba) {
924                         pr_err("cmd exceeds last lba %llu "
925                                 "(lba %llu, sectors %u)\n",
926                                 end_lba, cmd->t_task_lba, sectors);
927                         return TCM_ADDRESS_OUT_OF_RANGE;
928                 }
929
930                 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
931                         size = sbc_get_size(cmd, sectors);
932         }
933
934         return target_cmd_size_check(cmd, size);
935 }
936 EXPORT_SYMBOL(sbc_parse_cdb);
937
938 u32 sbc_get_device_type(struct se_device *dev)
939 {
940         return TYPE_DISK;
941 }
942 EXPORT_SYMBOL(sbc_get_device_type);
943
944 sense_reason_t
945 sbc_execute_unmap(struct se_cmd *cmd,
946         sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
947                                       sector_t, sector_t),
948         void *priv)
949 {
950         struct se_device *dev = cmd->se_dev;
951         unsigned char *buf, *ptr = NULL;
952         sector_t lba;
953         int size;
954         u32 range;
955         sense_reason_t ret = 0;
956         int dl, bd_dl;
957
958         /* We never set ANC_SUP */
959         if (cmd->t_task_cdb[1])
960                 return TCM_INVALID_CDB_FIELD;
961
962         if (cmd->data_length == 0) {
963                 target_complete_cmd(cmd, SAM_STAT_GOOD);
964                 return 0;
965         }
966
967         if (cmd->data_length < 8) {
968                 pr_warn("UNMAP parameter list length %u too small\n",
969                         cmd->data_length);
970                 return TCM_PARAMETER_LIST_LENGTH_ERROR;
971         }
972
973         buf = transport_kmap_data_sg(cmd);
974         if (!buf)
975                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
976
977         dl = get_unaligned_be16(&buf[0]);
978         bd_dl = get_unaligned_be16(&buf[2]);
979
980         size = cmd->data_length - 8;
981         if (bd_dl > size)
982                 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
983                         cmd->data_length, bd_dl);
984         else
985                 size = bd_dl;
986
987         if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
988                 ret = TCM_INVALID_PARAMETER_LIST;
989                 goto err;
990         }
991
992         /* First UNMAP block descriptor starts at 8 byte offset */
993         ptr = &buf[8];
994         pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
995                 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
996
997         while (size >= 16) {
998                 lba = get_unaligned_be64(&ptr[0]);
999                 range = get_unaligned_be32(&ptr[8]);
1000                 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1001                                  (unsigned long long)lba, range);
1002
1003                 if (range > dev->dev_attrib.max_unmap_lba_count) {
1004                         ret = TCM_INVALID_PARAMETER_LIST;
1005                         goto err;
1006                 }
1007
1008                 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1009                         ret = TCM_ADDRESS_OUT_OF_RANGE;
1010                         goto err;
1011                 }
1012
1013                 ret = do_unmap_fn(cmd, priv, lba, range);
1014                 if (ret)
1015                         goto err;
1016
1017                 ptr += 16;
1018                 size -= 16;
1019         }
1020
1021 err:
1022         transport_kunmap_data_sg(cmd);
1023         if (!ret)
1024                 target_complete_cmd(cmd, GOOD);
1025         return ret;
1026 }
1027 EXPORT_SYMBOL(sbc_execute_unmap);
1028
1029 static sense_reason_t
1030 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1031                   const void *p, sector_t sector, unsigned int ei_lba)
1032 {
1033         int block_size = dev->dev_attrib.block_size;
1034         __be16 csum;
1035
1036         csum = cpu_to_be16(crc_t10dif(p, block_size));
1037
1038         if (sdt->guard_tag != csum) {
1039                 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1040                         " csum 0x%04x\n", (unsigned long long)sector,
1041                         be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1042                 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1043         }
1044
1045         if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1046             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1047                 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1048                        " sector MSB: 0x%08x\n", (unsigned long long)sector,
1049                        be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1050                 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1051         }
1052
1053         if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1054             be32_to_cpu(sdt->ref_tag) != ei_lba) {
1055                 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1056                        " ei_lba: 0x%08x\n", (unsigned long long)sector,
1057                         be32_to_cpu(sdt->ref_tag), ei_lba);
1058                 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1059         }
1060
1061         return 0;
1062 }
1063
1064 static void
1065 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1066                   struct scatterlist *sg, int sg_off)
1067 {
1068         struct se_device *dev = cmd->se_dev;
1069         struct scatterlist *psg;
1070         void *paddr, *addr;
1071         unsigned int i, len, left;
1072
1073         left = sectors * dev->prot_length;
1074
1075         for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1076
1077                 len = min(psg->length, left);
1078                 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1079                 addr = kmap_atomic(sg_page(sg)) + sg_off;
1080
1081                 if (read)
1082                         memcpy(paddr, addr, len);
1083                 else
1084                         memcpy(addr, paddr, len);
1085
1086                 left -= len;
1087                 kunmap_atomic(paddr);
1088                 kunmap_atomic(addr);
1089         }
1090 }
1091
1092 sense_reason_t
1093 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1094                      unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1095 {
1096         struct se_device *dev = cmd->se_dev;
1097         struct se_dif_v1_tuple *sdt;
1098         struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1099         sector_t sector = start;
1100         void *daddr, *paddr;
1101         int i, j, offset = 0;
1102         sense_reason_t rc;
1103
1104         for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1105                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1106                 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1107
1108                 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1109
1110                         if (offset >= psg->length) {
1111                                 kunmap_atomic(paddr);
1112                                 psg = sg_next(psg);
1113                                 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1114                                 offset = 0;
1115                         }
1116
1117                         sdt = paddr + offset;
1118
1119                         pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1120                                  " app_tag: 0x%04x ref_tag: %u\n",
1121                                  (unsigned long long)sector, sdt->guard_tag,
1122                                  sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1123
1124                         rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1125                                                ei_lba);
1126                         if (rc) {
1127                                 kunmap_atomic(paddr);
1128                                 kunmap_atomic(daddr);
1129                                 return rc;
1130                         }
1131
1132                         sector++;
1133                         ei_lba++;
1134                         offset += sizeof(struct se_dif_v1_tuple);
1135                 }
1136
1137                 kunmap_atomic(paddr);
1138                 kunmap_atomic(daddr);
1139         }
1140         sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1141
1142         return 0;
1143 }
1144 EXPORT_SYMBOL(sbc_dif_verify_write);
1145
1146 sense_reason_t
1147 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1148                     unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1149 {
1150         struct se_device *dev = cmd->se_dev;
1151         struct se_dif_v1_tuple *sdt;
1152         struct scatterlist *dsg;
1153         sector_t sector = start;
1154         void *daddr, *paddr;
1155         int i, j, offset = sg_off;
1156         sense_reason_t rc;
1157
1158         for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1159                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1160                 paddr = kmap_atomic(sg_page(sg)) + sg->offset;
1161
1162                 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1163
1164                         if (offset >= sg->length) {
1165                                 kunmap_atomic(paddr);
1166                                 sg = sg_next(sg);
1167                                 paddr = kmap_atomic(sg_page(sg)) + sg->offset;
1168                                 offset = 0;
1169                         }
1170
1171                         sdt = paddr + offset;
1172
1173                         pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1174                                  " app_tag: 0x%04x ref_tag: %u\n",
1175                                  (unsigned long long)sector, sdt->guard_tag,
1176                                  sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1177
1178                         if (sdt->app_tag == cpu_to_be16(0xffff)) {
1179                                 sector++;
1180                                 offset += sizeof(struct se_dif_v1_tuple);
1181                                 continue;
1182                         }
1183
1184                         rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1185                                                ei_lba);
1186                         if (rc) {
1187                                 kunmap_atomic(paddr);
1188                                 kunmap_atomic(daddr);
1189                                 return rc;
1190                         }
1191
1192                         sector++;
1193                         ei_lba++;
1194                         offset += sizeof(struct se_dif_v1_tuple);
1195                 }
1196
1197                 kunmap_atomic(paddr);
1198                 kunmap_atomic(daddr);
1199         }
1200         sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1201
1202         return 0;
1203 }
1204 EXPORT_SYMBOL(sbc_dif_verify_read);