UPSTREAM: PCI: rockchip: Add Kconfig COMPILE_TEST
[firefly-linux-kernel-4.4.55.git] / drivers / target / target_core_file.c
index b11890d85120977d6e380db19192143ddab6251c..79291869bce6c72d9eb4a72fa6d6f5614a104aa4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * This file contains the Storage Engine <-> FILEIO transport specific functions
  *
- * (c) Copyright 2005-2012 RisingTide Systems LLC.
+ * (c) Copyright 2005-2013 Datera, Inc.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -30,9 +30,9 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 #include <linux/falloc.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
 #include <target/target_core_base.h>
@@ -45,10 +45,6 @@ static inline struct fd_dev *FD_DEV(struct se_device *dev)
        return container_of(dev, struct fd_dev, dev);
 }
 
-/*     fd_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
 {
        struct fd_host *fd_host;
@@ -65,10 +61,9 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
 
        pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
                " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
-               TARGET_CORE_MOD_VERSION);
-       pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
-               " MaxSectors: %u\n",
-               hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+               TARGET_CORE_VERSION);
+       pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+               hba->hba_id, fd_host->fd_host_id);
 
        return 0;
 }
@@ -165,25 +160,10 @@ static int fd_configure_device(struct se_device *dev)
                        " block_device blocks: %llu logical_block_size: %d\n",
                        dev_size, div_u64(dev_size, fd_dev->fd_block_size),
                        fd_dev->fd_block_size);
-               /*
-                * Check if the underlying struct block_device request_queue supports
-                * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
-                * in ATA and we need to set TPE=1
-                */
-               if (blk_queue_discard(q)) {
-                       dev->dev_attrib.max_unmap_lba_count =
-                               q->limits.max_discard_sectors;
-                       /*
-                        * Currently hardcoded to 1 in Linux/SCSI code..
-                        */
-                       dev->dev_attrib.max_unmap_block_desc_count = 1;
-                       dev->dev_attrib.unmap_granularity =
-                               q->limits.discard_granularity >> 9;
-                       dev->dev_attrib.unmap_granularity_alignment =
-                               q->limits.discard_alignment;
+
+               if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
                        pr_debug("IFILE: BLOCK Discard support available,"
-                                       " disabled by default\n");
-               }
+                                " disabled by default\n");
                /*
                 * Enable write same emulation for IBLOCK and use 0xFFFF as
                 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -220,7 +200,8 @@ static int fd_configure_device(struct se_device *dev)
        }
 
        dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
-       dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+       dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
+       dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
        dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
 
        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@ -245,6 +226,14 @@ fail:
        return ret;
 }
 
+static void fd_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct fd_dev *fd_dev = FD_DEV(dev);
+
+       kfree(fd_dev);
+}
+
 static void fd_free_device(struct se_device *dev)
 {
        struct fd_dev *fd_dev = FD_DEV(dev);
@@ -253,50 +242,44 @@ static void fd_free_device(struct se_device *dev)
                filp_close(fd_dev->fd_file, NULL);
                fd_dev->fd_file = NULL;
        }
-
-       kfree(fd_dev);
+       call_rcu(&dev->rcu_head, fd_dev_call_rcu);
 }
 
-static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
-               u32 sgl_nents, int is_write)
+static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+                   u32 block_size, struct scatterlist *sgl,
+                   u32 sgl_nents, u32 data_length, int is_write)
 {
-       struct se_device *se_dev = cmd->se_dev;
-       struct fd_dev *dev = FD_DEV(se_dev);
-       struct file *fd = dev->fd_file;
        struct scatterlist *sg;
-       struct iovec *iov;
-       mm_segment_t old_fs;
-       loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
+       struct iov_iter iter;
+       struct bio_vec *bvec;
+       ssize_t len = 0;
+       loff_t pos = (cmd->t_task_lba * block_size);
        int ret = 0, i;
 
-       iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
-       if (!iov) {
+       bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
+       if (!bvec) {
                pr_err("Unable to allocate fd_do_readv iov[]\n");
                return -ENOMEM;
        }
 
        for_each_sg(sgl, sg, sgl_nents, i) {
-               iov[i].iov_len = sg->length;
-               iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
-       }
+               bvec[i].bv_page = sg_page(sg);
+               bvec[i].bv_len = sg->length;
+               bvec[i].bv_offset = sg->offset;
 
-       old_fs = get_fs();
-       set_fs(get_ds());
+               len += sg->length;
+       }
 
+       iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
        if (is_write)
-               ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+               ret = vfs_iter_write(fd, &iter, &pos);
        else
-               ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
-
-       set_fs(old_fs);
+               ret = vfs_iter_read(fd, &iter, &pos);
 
-       for_each_sg(sgl, sg, sgl_nents, i)
-               kunmap(sg_page(sg));
-
-       kfree(iov);
+       kfree(bvec);
 
        if (is_write) {
-               if (ret < 0 || ret != cmd->data_length) {
+               if (ret < 0 || ret != data_length) {
                        pr_err("%s() write returned %d\n", __func__, ret);
                        return (ret < 0 ? ret : -EINVAL);
                }
@@ -307,10 +290,10 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
                 * block_device.
                 */
                if (S_ISBLK(file_inode(fd)->i_mode)) {
-                       if (ret < 0 || ret != cmd->data_length) {
+                       if (ret < 0 || ret != data_length) {
                                pr_err("%s() returned %d, expecting %u for "
                                                "S_ISBLK\n", __func__, ret,
-                                               cmd->data_length);
+                                               data_length);
                                return (ret < 0 ? ret : -EINVAL);
                        }
                } else {
@@ -349,7 +332,7 @@ fd_execute_sync_cache(struct se_cmd *cmd)
        } else {
                start = cmd->t_task_lba * dev->dev_attrib.block_size;
                if (cmd->data_length)
-                       end = start + cmd->data_length;
+                       end = start + cmd->data_length - 1;
                else
                        end = LLONG_MAX;
        }
@@ -369,127 +352,135 @@ fd_execute_sync_cache(struct se_cmd *cmd)
        return 0;
 }
 
-static unsigned char *
-fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
-                   unsigned int len)
-{
-       struct se_device *se_dev = cmd->se_dev;
-       unsigned int block_size = se_dev->dev_attrib.block_size;
-       unsigned int i = 0, end;
-       unsigned char *buf, *p, *kmap_buf;
-
-       buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
-       if (!buf) {
-               pr_err("Unable to allocate fd_execute_write_same buf\n");
-               return NULL;
-       }
-
-       kmap_buf = kmap(sg_page(sg)) + sg->offset;
-       if (!kmap_buf) {
-               pr_err("kmap() failed in fd_setup_write_same\n");
-               kfree(buf);
-               return NULL;
-       }
-       /*
-        * Fill local *buf to contain multiple WRITE_SAME blocks up to
-        * min(len, PAGE_SIZE)
-        */
-       p = buf;
-       end = min_t(unsigned int, len, PAGE_SIZE);
-
-       while (i < end) {
-               memcpy(p, kmap_buf, block_size);
-
-               i += block_size;
-               p += block_size;
-       }
-       kunmap(sg_page(sg));
-
-       return buf;
-}
-
 static sense_reason_t
 fd_execute_write_same(struct se_cmd *cmd)
 {
        struct se_device *se_dev = cmd->se_dev;
        struct fd_dev *fd_dev = FD_DEV(se_dev);
-       struct file *f = fd_dev->fd_file;
-       struct scatterlist *sg;
-       struct iovec *iov;
-       mm_segment_t old_fs;
-       sector_t nolb = sbc_get_write_same_sectors(cmd);
        loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
-       unsigned int len, len_tmp, iov_num;
-       int i, rc;
-       unsigned char *buf;
+       sector_t nolb = sbc_get_write_same_sectors(cmd);
+       struct iov_iter iter;
+       struct bio_vec *bvec;
+       unsigned int len = 0, i;
+       ssize_t ret;
 
        if (!nolb) {
                target_complete_cmd(cmd, SAM_STAT_GOOD);
                return 0;
        }
-       sg = &cmd->t_data_sg[0];
+       if (cmd->prot_op) {
+               pr_err("WRITE_SAME: Protection information with FILEIO"
+                      " backends not supported\n");
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
 
        if (cmd->t_data_nents > 1 ||
-           sg->length != cmd->se_dev->dev_attrib.block_size) {
+           cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
                pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
-                       " block_size: %u\n", cmd->t_data_nents, sg->length,
+                       " block_size: %u\n",
+                       cmd->t_data_nents,
+                       cmd->t_data_sg[0].length,
                        cmd->se_dev->dev_attrib.block_size);
                return TCM_INVALID_CDB_FIELD;
        }
 
-       len = len_tmp = nolb * se_dev->dev_attrib.block_size;
-       iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
-
-       buf = fd_setup_write_same_buf(cmd, sg, len);
-       if (!buf)
+       bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
+       if (!bvec)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-       iov = vzalloc(sizeof(struct iovec) * iov_num);
-       if (!iov) {
-               pr_err("Unable to allocate fd_execute_write_same iovecs\n");
-               kfree(buf);
+       for (i = 0; i < nolb; i++) {
+               bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
+               bvec[i].bv_len = cmd->t_data_sg[0].length;
+               bvec[i].bv_offset = cmd->t_data_sg[0].offset;
+
+               len += se_dev->dev_attrib.block_size;
+       }
+
+       iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
+       ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos);
+
+       kfree(bvec);
+       if (ret < 0 || ret != len) {
+               pr_err("vfs_iter_write() returned %zd for write same\n", ret);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
-       /*
-        * Map the single fabric received scatterlist block now populated
-        * in *buf into each iovec for I/O submission.
-        */
-       for (i = 0; i < iov_num; i++) {
-               iov[i].iov_base = buf;
-               iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
-               len_tmp -= iov[i].iov_len;
+
+       target_complete_cmd(cmd, SAM_STAT_GOOD);
+       return 0;
+}
+
+static int
+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
+               void *buf, size_t bufsize)
+{
+       struct fd_dev *fd_dev = FD_DEV(se_dev);
+       struct file *prot_fd = fd_dev->fd_prot_file;
+       sector_t prot_length, prot;
+       loff_t pos = lba * se_dev->prot_length;
+
+       if (!prot_fd) {
+               pr_err("Unable to locate fd_dev->fd_prot_file\n");
+               return -ENODEV;
        }
 
-       old_fs = get_fs();
-       set_fs(get_ds());
-       rc = vfs_writev(f, &iov[0], iov_num, &pos);
-       set_fs(old_fs);
+       prot_length = nolb * se_dev->prot_length;
 
-       vfree(iov);
-       kfree(buf);
+       for (prot = 0; prot < prot_length;) {
+               sector_t len = min_t(sector_t, bufsize, prot_length - prot);
+               ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
 
-       if (rc < 0 || rc != len) {
-               pr_err("vfs_writev() returned %d for write same\n", rc);
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               if (ret != len) {
+                       pr_err("vfs_write to prot file failed: %zd\n", ret);
+                       return ret < 0 ? ret : -ENODEV;
+               }
+               prot += ret;
        }
 
-       target_complete_cmd(cmd, SAM_STAT_GOOD);
        return 0;
 }
 
+static int
+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+       void *buf;
+       int rc;
+
+       buf = (void *)__get_free_page(GFP_KERNEL);
+       if (!buf) {
+               pr_err("Unable to allocate FILEIO prot buf\n");
+               return -ENOMEM;
+       }
+       memset(buf, 0xff, PAGE_SIZE);
+
+       rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
+
+       free_page((unsigned long)buf);
+
+       return rc;
+}
+
 static sense_reason_t
-fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 {
-       struct file *file = priv;
+       struct file *file = FD_DEV(cmd->se_dev)->fd_file;
        struct inode *inode = file->f_mapping->host;
        int ret;
 
+       if (cmd->se_dev->dev_attrib.pi_prot_type) {
+               ret = fd_do_prot_unmap(cmd, lba, nolb);
+               if (ret)
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
+
        if (S_ISBLK(inode->i_mode)) {
                /* The backend is block device, use discard */
                struct block_device *bdev = inode->i_bdev;
+               struct se_device *dev = cmd->se_dev;
 
-               ret = blkdev_issue_discard(bdev, lba,
-                               nolb, GFP_KERNEL, 0);
+               ret = blkdev_issue_discard(bdev,
+                                          target_to_linux_sector(dev, lba),
+                                          target_to_linux_sector(dev,  nolb),
+                                          GFP_KERNEL, 0);
                if (ret < 0) {
                        pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
                                ret);
@@ -516,68 +507,88 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
 }
 
 static sense_reason_t
-fd_execute_write_same_unmap(struct se_cmd *cmd)
+fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+             enum dma_data_direction data_direction)
 {
-       struct se_device *se_dev = cmd->se_dev;
-       struct fd_dev *fd_dev = FD_DEV(se_dev);
-       struct file *file = fd_dev->fd_file;
-       sector_t lba = cmd->t_task_lba;
-       sector_t nolb = sbc_get_write_same_sectors(cmd);
-       int ret;
-
-       if (!nolb) {
-               target_complete_cmd(cmd, SAM_STAT_GOOD);
-               return 0;
-       }
-
-       ret = fd_do_unmap(cmd, file, lba, nolb);
-       if (ret)
-               return ret;
-
-       target_complete_cmd(cmd, GOOD);
-       return 0;
-}
-
-static sense_reason_t
-fd_execute_unmap(struct se_cmd *cmd)
-{
-       struct file *file = FD_DEV(cmd->se_dev)->fd_file;
-
-       return sbc_execute_unmap(cmd, fd_do_unmap, file);
-}
-
-static sense_reason_t
-fd_execute_rw(struct se_cmd *cmd)
-{
-       struct scatterlist *sgl = cmd->t_data_sg;
-       u32 sgl_nents = cmd->t_data_nents;
-       enum dma_data_direction data_direction = cmd->data_direction;
        struct se_device *dev = cmd->se_dev;
+       struct fd_dev *fd_dev = FD_DEV(dev);
+       struct file *file = fd_dev->fd_file;
+       struct file *pfile = fd_dev->fd_prot_file;
+       sense_reason_t rc;
        int ret = 0;
-
+       /*
+        * We are currently limited by the number of iovecs (2048) per
+        * single vfs_[writev,readv] call.
+        */
+       if (cmd->data_length > FD_MAX_BYTES) {
+               pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+                      "FD_MAX_BYTES: %u iovec count limitiation\n",
+                       cmd->data_length, FD_MAX_BYTES);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        /*
         * Call vectorized fileio functions to map struct scatterlist
         * physical memory addresses to struct iovec virtual memory.
         */
        if (data_direction == DMA_FROM_DEVICE) {
-               ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+               if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+                       ret = fd_do_rw(cmd, pfile, dev->prot_length,
+                                      cmd->t_prot_sg, cmd->t_prot_nents,
+                                      cmd->prot_length, 0);
+                       if (ret < 0)
+                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               }
+
+               ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+                              sgl, sgl_nents, cmd->data_length, 0);
+
+               if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+                       u32 sectors = cmd->data_length >>
+                                       ilog2(dev->dev_attrib.block_size);
+
+                       rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+                                           0, cmd->t_prot_sg, 0);
+                       if (rc)
+                               return rc;
+               }
        } else {
-               ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
+               if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+                       u32 sectors = cmd->data_length >>
+                                       ilog2(dev->dev_attrib.block_size);
+
+                       rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+                                           0, cmd->t_prot_sg, 0);
+                       if (rc)
+                               return rc;
+               }
+
+               ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+                              sgl, sgl_nents, cmd->data_length, 1);
                /*
-                * Perform implict vfs_fsync_range() for fd_do_writev() ops
+                * Perform implicit vfs_fsync_range() for fd_do_writev() ops
                 * for SCSI WRITEs with Forced Unit Access (FUA) set.
                 * Allow this to happen independent of WCE=0 setting.
                 */
-               if (ret > 0 &&
-                   dev->dev_attrib.emulate_fua_write > 0 &&
-                   (cmd->se_cmd_flags & SCF_FUA)) {
-                       struct fd_dev *fd_dev = FD_DEV(dev);
+               if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
                        loff_t start = cmd->t_task_lba *
                                dev->dev_attrib.block_size;
-                       loff_t end = start + cmd->data_length;
+                       loff_t end;
+
+                       if (cmd->data_length)
+                               end = start + cmd->data_length - 1;
+                       else
+                               end = LLONG_MAX;
 
                        vfs_fsync_range(fd_dev->fd_file, start, end, 1);
                }
+
+               if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+                       ret = fd_do_rw(cmd, pfile, dev->prot_length,
+                                      cmd->t_prot_sg, cmd->t_prot_nents,
+                                      cmd->prot_length, 1);
+                       if (ret < 0)
+                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               }
        }
 
        if (ret < 0)
@@ -635,10 +646,10 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
                                ret = -ENOMEM;
                                break;
                        }
-                       ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+                       ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
                        kfree(arg_p);
                        if (ret < 0) {
-                               pr_err("strict_strtoull() failed for"
+                               pr_err("kstrtoull() failed for"
                                                " fd_dev_size=\n");
                                goto out;
                        }
@@ -647,7 +658,9 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
                        fd_dev->fbd_flags |= FBDF_HAS_SIZE;
                        break;
                case Opt_fd_buffered_io:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        if (arg != 1) {
                                pr_err("bogus fd_buffered_io=%d value\n", arg);
                                ret = -EINVAL;
@@ -702,11 +715,86 @@ static sector_t fd_get_blocks(struct se_device *dev)
                       dev->dev_attrib.block_size);
 }
 
+static int fd_init_prot(struct se_device *dev)
+{
+       struct fd_dev *fd_dev = FD_DEV(dev);
+       struct file *prot_file, *file = fd_dev->fd_file;
+       struct inode *inode;
+       int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+       char buf[FD_MAX_DEV_PROT_NAME];
+
+       if (!file) {
+               pr_err("Unable to locate fd_dev->fd_file\n");
+               return -ENODEV;
+       }
+
+       inode = file->f_mapping->host;
+       if (S_ISBLK(inode->i_mode)) {
+               pr_err("FILEIO Protection emulation only supported on"
+                      " !S_ISBLK\n");
+               return -ENOSYS;
+       }
+
+       if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+               flags &= ~O_DSYNC;
+
+       snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+                fd_dev->fd_dev_name);
+
+       prot_file = filp_open(buf, flags, 0600);
+       if (IS_ERR(prot_file)) {
+               pr_err("filp_open(%s) failed\n", buf);
+               ret = PTR_ERR(prot_file);
+               return ret;
+       }
+       fd_dev->fd_prot_file = prot_file;
+
+       return 0;
+}
+
+static int fd_format_prot(struct se_device *dev)
+{
+       unsigned char *buf;
+       int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+       int ret;
+
+       if (!dev->dev_attrib.pi_prot_type) {
+               pr_err("Unable to format_prot while pi_prot_type == 0\n");
+               return -ENODEV;
+       }
+
+       buf = vzalloc(unit_size);
+       if (!buf) {
+               pr_err("Unable to allocate FILEIO prot buf\n");
+               return -ENOMEM;
+       }
+
+       pr_debug("Using FILEIO prot_length: %llu\n",
+                (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
+                                       dev->prot_length);
+
+       memset(buf, 0xff, unit_size);
+       ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
+                             buf, unit_size);
+       vfree(buf);
+       return ret;
+}
+
+static void fd_free_prot(struct se_device *dev)
+{
+       struct fd_dev *fd_dev = FD_DEV(dev);
+
+       if (!fd_dev->fd_prot_file)
+               return;
+
+       filp_close(fd_dev->fd_prot_file, NULL);
+       fd_dev->fd_prot_file = NULL;
+}
+
 static struct sbc_ops fd_sbc_ops = {
        .execute_rw             = fd_execute_rw,
        .execute_sync_cache     = fd_execute_sync_cache,
        .execute_write_same     = fd_execute_write_same,
-       .execute_write_same_unmap = fd_execute_write_same_unmap,
        .execute_unmap          = fd_execute_unmap,
 };
 
@@ -716,12 +804,11 @@ fd_parse_cdb(struct se_cmd *cmd)
        return sbc_parse_cdb(cmd, &fd_sbc_ops);
 }
 
-static struct se_subsystem_api fileio_template = {
+static const struct target_backend_ops fileio_ops = {
        .name                   = "fileio",
        .inquiry_prod           = "FILEIO",
        .inquiry_rev            = FD_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = fd_attach_hba,
        .detach_hba             = fd_detach_hba,
        .alloc_device           = fd_alloc_device,
@@ -732,16 +819,20 @@ static struct se_subsystem_api fileio_template = {
        .show_configfs_dev_params = fd_show_configfs_dev_params,
        .get_device_type        = sbc_get_device_type,
        .get_blocks             = fd_get_blocks,
+       .init_prot              = fd_init_prot,
+       .format_prot            = fd_format_prot,
+       .free_prot              = fd_free_prot,
+       .tb_dev_attrib_attrs    = sbc_attrib_attrs,
 };
 
 static int __init fileio_module_init(void)
 {
-       return transport_subsystem_register(&fileio_template);
+       return transport_backend_register(&fileio_ops);
 }
 
 static void __exit fileio_module_exit(void)
 {
-       transport_subsystem_release(&fileio_template);
+       target_backend_unregister(&fileio_ops);
 }
 
 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");