2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/file.h>
17 #include <linux/syscalls.h>
19 #include <asm/unaligned.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_eh.h>
25 #include <uapi/scsi/cxlflash_ioctl.h>
30 #include "superpipe.h"
32 struct cxlflash_global global;
35 * marshal_rele_to_resize() - translate release to resize structure
36 * @rele: Source structure from which to translate/copy.
37 * @resize: Destination structure for the translate/copy.
39 static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
40 struct dk_cxlflash_resize *resize)
42 resize->hdr = release->hdr;
43 resize->context_id = release->context_id;
44 resize->rsrc_handle = release->rsrc_handle;
48 * marshal_det_to_rele() - translate detach to release structure
49 * @detach: Destination structure for the translate/copy.
50 * @rele: Source structure from which to translate/copy.
52 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
53 struct dk_cxlflash_release *release)
55 release->hdr = detach->hdr;
56 release->context_id = detach->context_id;
60 * cxlflash_free_errpage() - frees resources associated with global error page
62 void cxlflash_free_errpage(void)
65 mutex_lock(&global.mutex);
66 if (global.err_page) {
67 __free_page(global.err_page);
68 global.err_page = NULL;
70 mutex_unlock(&global.mutex);
74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
75 * @cfg: Internal structure associated with the host.
77 * When the host needs to go down, all users must be quiesced and their
78 * memory freed. This is accomplished by putting the contexts in error
79 * state which will notify the user and let them 'drive' the tear-down.
80 * Meanwhile, this routine camps until all user contexts have been removed.
82 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
84 struct device *dev = &cfg->dev->dev;
87 cxlflash_mark_contexts_error(cfg);
92 for (i = 0; i < MAX_CONTEXT; i++)
93 if (cfg->ctx_tbl[i]) {
98 if (!found && list_empty(&cfg->ctx_err_recovery))
101 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
103 wake_up_all(&cfg->limbo_waitq);
109 * find_error_context() - locates a context by cookie on the error recovery list
110 * @cfg: Internal structure associated with the host.
111 * @rctxid: Desired context by id.
112 * @file: Desired context by file.
114 * Return: Found context on success, NULL on failure
116 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
119 struct ctx_info *ctxi;
121 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
122 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
129 * get_context() - obtains a validated and locked context reference
130 * @cfg: Internal structure associated with the host.
131 * @rctxid: Desired context (raw, un-decoded format).
132 * @arg: LUN information or file associated with request.
133 * @ctx_ctrl: Control information to 'steer' desired lookup.
135 * NOTE: despite the name pid, in linux, current->pid actually refers
136 * to the lightweight process id (tid) and can change if the process is
137 * multi threaded. The tgid remains constant for the process and only changes
138 * when the process of fork. For all intents and purposes, think of tgid
139 * as a pid in the traditional sense.
141 * Return: Validated context on success, NULL on failure
143 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
144 void *arg, enum ctx_ctrl ctx_ctrl)
146 struct device *dev = &cfg->dev->dev;
147 struct ctx_info *ctxi = NULL;
148 struct lun_access *lun_access = NULL;
149 struct file *file = NULL;
150 struct llun_info *lli = arg;
151 u64 ctxid = DECODE_CTXID(rctxid);
153 pid_t pid = current->tgid, ctxpid = 0;
155 if (ctx_ctrl & CTX_CTRL_FILE) {
157 file = (struct file *)arg;
160 if (ctx_ctrl & CTX_CTRL_CLONE)
161 pid = current->parent->tgid;
163 if (likely(ctxid < MAX_CONTEXT)) {
165 rc = mutex_lock_interruptible(&cfg->ctx_tbl_list_mutex);
169 ctxi = cfg->ctx_tbl[ctxid];
171 if ((file && (ctxi->file != file)) ||
172 (!file && (ctxi->ctxid != rctxid)))
175 if ((ctx_ctrl & CTX_CTRL_ERR) ||
176 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
177 ctxi = find_error_context(cfg, rctxid, file);
179 mutex_unlock(&cfg->ctx_tbl_list_mutex);
184 * Need to acquire ownership of the context while still
185 * under the table/list lock to serialize with a remove
186 * thread. Use the 'try' to avoid stalling the
187 * table/list lock for a single context.
189 * Note that the lock order is:
191 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
193 * Therefore release ctx_tbl_list_mutex before retrying.
195 rc = mutex_trylock(&ctxi->mutex);
196 mutex_unlock(&cfg->ctx_tbl_list_mutex);
198 break; /* got the context's lock! */
205 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
210 list_for_each_entry(lun_access, &ctxi->luns, list)
211 if (lun_access->lli == lli)
218 dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
219 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
225 mutex_unlock(&ctxi->mutex);
231 * put_context() - release a context that was retrieved from get_context()
232 * @ctxi: Context to release.
234 * For now, releasing the context equates to unlocking it's mutex.
236 void put_context(struct ctx_info *ctxi)
238 mutex_unlock(&ctxi->mutex);
242 * afu_attach() - attach a context to the AFU
243 * @cfg: Internal structure associated with the host.
244 * @ctxi: Context to attach.
246 * Upon setting the context capabilities, they must be confirmed with
247 * a read back operation as the context might have been closed since
248 * the mailbox was unlocked. When this occurs, registration is failed.
250 * Return: 0 on success, -errno on failure
252 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
254 struct device *dev = &cfg->dev->dev;
255 struct afu *afu = cfg->afu;
256 struct sisl_ctrl_map *ctrl_map = ctxi->ctrl_map;
260 /* Unlock cap and restrict user to read/write cmds in translated mode */
261 readq_be(&ctrl_map->mbox_r);
262 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
263 writeq_be(val, &ctrl_map->ctx_cap);
264 val = readq_be(&ctrl_map->ctx_cap);
265 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
266 dev_err(dev, "%s: ctx may be closed val=%016llX\n",
272 /* Set up MMIO registers pointing to the RHT */
273 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
274 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
275 writeq_be(val, &ctrl_map->rht_cnt_id);
277 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
282 * read_cap16() - issues a SCSI READ_CAP16 command
283 * @sdev: SCSI device associated with LUN.
284 * @lli: LUN destined for capacity request.
286 * Return: 0 on success, -errno on failure
288 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
290 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
291 struct device *dev = &cfg->dev->dev;
292 struct glun_info *gli = lli->parent;
295 u8 *sense_buf = NULL;
299 u32 tout = (MC_DISCOVERY_TIMEOUT * HZ);
302 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
303 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
304 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
305 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
310 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
311 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
312 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
314 dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
315 retry_cnt ? "re" : "", scsi_cmd[0]);
317 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
318 CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
320 if (driver_byte(result) == DRIVER_SENSE) {
321 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
322 if (result & SAM_STAT_CHECK_CONDITION) {
323 struct scsi_sense_hdr sshdr;
325 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
327 switch (sshdr.sense_key) {
329 case RECOVERED_ERROR:
332 result &= ~SAM_STAT_CHECK_CONDITION;
336 case 0x29: /* Power on Reset or Device Reset */
338 case 0x2A: /* Device capacity changed */
339 case 0x3F: /* Report LUNs changed */
340 /* Retry the command once more */
341 if (retry_cnt++ < 1) {
356 dev_err(dev, "%s: command failed, result=0x%x\n",
363 * Read cap was successful, grab values from the buffer;
364 * note that we don't need to worry about unaligned access
365 * as the buffer is allocated on an aligned boundary.
367 mutex_lock(&gli->mutex);
368 gli->max_lba = be64_to_cpu(*((u64 *)&cmd_buf[0]));
369 gli->blk_len = be32_to_cpu(*((u32 *)&cmd_buf[8]));
370 mutex_unlock(&gli->mutex);
377 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
378 __func__, gli->max_lba, gli->blk_len, rc);
383 * get_rhte() - obtains validated resource handle table entry reference
384 * @ctxi: Context owning the resource handle.
385 * @rhndl: Resource handle associated with entry.
386 * @lli: LUN associated with request.
388 * Return: Validated RHTE on success, NULL on failure
390 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
391 struct llun_info *lli)
393 struct sisl_rht_entry *rhte = NULL;
395 if (unlikely(!ctxi->rht_start)) {
396 pr_debug("%s: Context does not have allocated RHT!\n",
401 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
402 pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
406 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
407 pr_debug("%s: Bad resource handle LUN! (%d)\n",
412 rhte = &ctxi->rht_start[rhndl];
413 if (unlikely(rhte->nmask == 0)) {
414 pr_debug("%s: Unopened resource handle! (%d)\n",
425 * rhte_checkout() - obtains free/empty resource handle table entry
426 * @ctxi: Context owning the resource handle.
427 * @lli: LUN associated with request.
429 * Return: Free RHTE on success, NULL on failure
431 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
432 struct llun_info *lli)
434 struct sisl_rht_entry *rhte = NULL;
437 /* Find a free RHT entry */
438 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
439 if (ctxi->rht_start[i].nmask == 0) {
440 rhte = &ctxi->rht_start[i];
446 ctxi->rht_lun[i] = lli;
448 pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
453 * rhte_checkin() - releases a resource handle table entry
454 * @ctxi: Context owning the resource handle.
455 * @rhte: RHTE to release.
457 void rhte_checkin(struct ctx_info *ctxi,
458 struct sisl_rht_entry *rhte)
460 u32 rsrc_handle = rhte - ctxi->rht_start;
465 ctxi->rht_lun[rsrc_handle] = NULL;
466 ctxi->rht_needs_ws[rsrc_handle] = false;
470 * rhte_format1() - populates a RHTE for format 1
471 * @rhte: RHTE to populate.
472 * @lun_id: LUN ID of LUN associated with RHTE.
473 * @perm: Desired permissions for RHTE.
474 * @port_sel: Port selection mask
476 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
480 * Populate the Format 1 RHT entry for direct access (physical
481 * LUN) using the synchronization sequence defined in the
482 * SISLite specification.
484 struct sisl_rht_entry_f1 dummy = { 0 };
485 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
487 memset(rhte_f1, 0, sizeof(*rhte_f1));
488 rhte_f1->fp = SISL_RHT_FP(1U, 0);
489 dma_wmb(); /* Make setting of format bit visible */
491 rhte_f1->lun_id = lun_id;
492 dma_wmb(); /* Make setting of LUN id visible */
495 * Use a dummy RHT Format 1 entry to build the second dword
496 * of the entry that must be populated in a single write when
497 * enabled (valid bit set to TRUE).
500 dummy.fp = SISL_RHT_FP(1U, perm);
501 dummy.port_sel = port_sel;
502 rhte_f1->dw = dummy.dw;
504 dma_wmb(); /* Make remaining RHT entry fields visible */
508 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
509 * @gli: LUN to attach.
510 * @mode: Desired mode of the LUN.
511 * @locked: Mutex status on current thread.
513 * Return: 0 on success, -errno on failure
515 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
520 mutex_lock(&gli->mutex);
522 if (gli->mode == MODE_NONE)
524 else if (gli->mode != mode) {
525 pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
526 __func__, gli->mode, mode);
532 WARN_ON(gli->users <= 0);
534 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
535 __func__, rc, gli->mode, gli->users);
537 mutex_unlock(&gli->mutex);
542 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
543 * @gli: LUN to detach.
545 * When resetting the mode, terminate block allocation resources as they
546 * are no longer required (service is safe to call even when block allocation
547 * resources were not present - such as when transitioning from physical mode).
548 * These resources will be reallocated when needed (subsequent transition to
551 void cxlflash_lun_detach(struct glun_info *gli)
553 mutex_lock(&gli->mutex);
554 WARN_ON(gli->mode == MODE_NONE);
555 if (--gli->users == 0) {
556 gli->mode = MODE_NONE;
557 cxlflash_ba_terminate(&gli->blka.ba_lun);
559 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
560 WARN_ON(gli->users < 0);
561 mutex_unlock(&gli->mutex);
565 * _cxlflash_disk_release() - releases the specified resource entry
566 * @sdev: SCSI device associated with LUN.
567 * @ctxi: Context owning resources.
568 * @release: Release ioctl data structure.
570 * For LUNs in virtual mode, the virtual LUN associated with the specified
571 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
572 * AFU sync should _not_ be performed when the context is sitting on the error
573 * recovery list. A context on the error recovery list is not known to the AFU
574 * due to reset. When the context is recovered, it will be reattached and made
575 * known again to the AFU.
577 * Return: 0 on success, -errno on failure
579 int _cxlflash_disk_release(struct scsi_device *sdev,
580 struct ctx_info *ctxi,
581 struct dk_cxlflash_release *release)
583 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
584 struct device *dev = &cfg->dev->dev;
585 struct llun_info *lli = sdev->hostdata;
586 struct glun_info *gli = lli->parent;
587 struct afu *afu = cfg->afu;
588 bool put_ctx = false;
590 struct dk_cxlflash_resize size;
591 res_hndl_t rhndl = release->rsrc_handle;
594 u64 ctxid = DECODE_CTXID(release->context_id),
595 rctxid = release->context_id;
597 struct sisl_rht_entry *rhte;
598 struct sisl_rht_entry_f1 *rhte_f1;
600 dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
601 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
604 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
605 if (unlikely(!ctxi)) {
606 dev_dbg(dev, "%s: Bad context! (%llu)\n",
615 rhte = get_rhte(ctxi, rhndl, lli);
616 if (unlikely(!rhte)) {
617 dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
624 * Resize to 0 for virtual LUNS by setting the size
625 * to 0. This will clear LXT_START and LXT_CNT fields
626 * in the RHT entry and properly sync with the AFU.
628 * Afterwards we clear the remaining fields.
632 marshal_rele_to_resize(release, &size);
634 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
636 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
643 * Clear the Format 1 RHT entry for direct access
644 * (physical LUN) using the synchronization sequence
645 * defined in the SISLite specification.
647 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
650 dma_wmb(); /* Make revocation of RHT entry visible */
653 dma_wmb(); /* Make clearing of LUN id visible */
656 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
658 if (!ctxi->err_recovery_active)
659 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
662 WARN(1, "Unsupported LUN mode!");
666 rhte_checkin(ctxi, rhte);
667 cxlflash_lun_detach(gli);
672 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
676 int cxlflash_disk_release(struct scsi_device *sdev,
677 struct dk_cxlflash_release *release)
679 return _cxlflash_disk_release(sdev, NULL, release);
683 * destroy_context() - releases a context
684 * @cfg: Internal structure associated with the host.
685 * @ctxi: Context to release.
687 * Note that the rht_lun member of the context was cut from a single
688 * allocation when the context was created and therefore does not need
689 * to be explicitly freed. Also note that we conditionally check for the
690 * existence of the context control map before clearing the RHT registers
691 * and context capabilities because it is possible to destroy a context
692 * while the context is in the error state (previous mapping was removed
693 * [so we don't have to worry about clearing] and context is waiting for
696 static void destroy_context(struct cxlflash_cfg *cfg,
697 struct ctx_info *ctxi)
699 struct afu *afu = cfg->afu;
701 WARN_ON(!list_empty(&ctxi->luns));
703 /* Clear RHT registers and drop all capabilities for this context */
704 if (afu->afu_map && ctxi->ctrl_map) {
705 writeq_be(0, &ctxi->ctrl_map->rht_start);
706 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
707 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
710 /* Free memory associated with context */
711 free_page((ulong)ctxi->rht_start);
712 kfree(ctxi->rht_needs_ws);
713 kfree(ctxi->rht_lun);
715 atomic_dec_if_positive(&cfg->num_user_contexts);
719 * create_context() - allocates and initializes a context
720 * @cfg: Internal structure associated with the host.
721 * @ctx: Previously obtained CXL context reference.
722 * @ctxid: Previously obtained process element associated with CXL context.
723 * @adap_fd: Previously obtained adapter fd associated with CXL context.
724 * @file: Previously obtained file associated with CXL context.
725 * @perms: User-specified permissions.
727 * The context's mutex is locked when an allocated context is returned.
729 * Return: Allocated context on success, NULL on failure
731 static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
732 struct cxl_context *ctx, int ctxid,
733 int adap_fd, struct file *file,
736 struct device *dev = &cfg->dev->dev;
737 struct afu *afu = cfg->afu;
738 struct ctx_info *ctxi = NULL;
739 struct llun_info **lli = NULL;
741 struct sisl_rht_entry *rhte;
743 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
744 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
745 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
746 if (unlikely(!ctxi || !lli || !ws)) {
747 dev_err(dev, "%s: Unable to allocate context!\n", __func__);
751 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
752 if (unlikely(!rhte)) {
753 dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
758 ctxi->rht_needs_ws = ws;
759 ctxi->rht_start = rhte;
760 ctxi->rht_perms = perms;
762 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
763 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
765 ctxi->pid = current->tgid; /* tgid = pid */
768 mutex_init(&ctxi->mutex);
769 INIT_LIST_HEAD(&ctxi->luns);
770 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
772 atomic_inc(&cfg->num_user_contexts);
773 mutex_lock(&ctxi->mutex);
786 * _cxlflash_disk_detach() - detaches a LUN from a context
787 * @sdev: SCSI device associated with LUN.
788 * @ctxi: Context owning resources.
789 * @detach: Detach ioctl data structure.
791 * As part of the detach, all per-context resources associated with the LUN
792 * are cleaned up. When detaching the last LUN for a context, the context
793 * itself is cleaned up and released.
795 * Return: 0 on success, -errno on failure
797 static int _cxlflash_disk_detach(struct scsi_device *sdev,
798 struct ctx_info *ctxi,
799 struct dk_cxlflash_detach *detach)
801 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
802 struct device *dev = &cfg->dev->dev;
803 struct llun_info *lli = sdev->hostdata;
804 struct lun_access *lun_access, *t;
805 struct dk_cxlflash_release rel;
806 bool put_ctx = false;
811 u64 ctxid = DECODE_CTXID(detach->context_id),
812 rctxid = detach->context_id;
814 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
817 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
818 if (unlikely(!ctxi)) {
819 dev_dbg(dev, "%s: Bad context! (%llu)\n",
828 /* Cleanup outstanding resources tied to this LUN */
830 marshal_det_to_rele(detach, &rel);
831 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
832 if (ctxi->rht_lun[i] == lli) {
834 _cxlflash_disk_release(sdev, ctxi, &rel);
837 /* No need to loop further if we're done */
838 if (ctxi->rht_out == 0)
843 /* Take our LUN out of context, free the node */
844 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
845 if (lun_access->lli == lli) {
846 list_del(&lun_access->list);
852 /* Tear down context following last LUN cleanup */
853 if (list_empty(&ctxi->luns)) {
854 ctxi->unavail = true;
855 mutex_unlock(&ctxi->mutex);
856 mutex_lock(&cfg->ctx_tbl_list_mutex);
857 mutex_lock(&ctxi->mutex);
859 /* Might not have been in error list so conditionally remove */
860 if (!list_empty(&ctxi->list))
861 list_del(&ctxi->list);
862 cfg->ctx_tbl[ctxid] = NULL;
863 mutex_unlock(&cfg->ctx_tbl_list_mutex);
864 mutex_unlock(&ctxi->mutex);
867 destroy_context(cfg, ctxi);
872 * As a last step, clean up external resources when not
873 * already on an external cleanup thread, i.e.: close(adap_fd).
875 * NOTE: this will free up the context from the CXL services,
876 * allowing it to dole out the same context_id on a future
877 * (or even currently in-flight) disk_attach operation.
886 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
890 static int cxlflash_disk_detach(struct scsi_device *sdev,
891 struct dk_cxlflash_detach *detach)
893 return _cxlflash_disk_detach(sdev, NULL, detach);
897 * cxlflash_cxl_release() - release handler for adapter file descriptor
898 * @inode: File-system inode associated with fd.
899 * @file: File installed with adapter file descriptor.
901 * This routine is the release handler for the fops registered with
902 * the CXL services on an initial attach for a context. It is called
903 * when a close is performed on the adapter file descriptor returned
904 * to the user. Programmatically, the user is not required to perform
905 * the close, as it is handled internally via the detach ioctl when
906 * a context is being removed. Note that nothing prevents the user
907 * from performing a close, but the user should be aware that doing
908 * so is considered catastrophic and subsequent usage of the superpipe
909 * API with previously saved off tokens will fail.
911 * When initiated from an external close (either by the user or via
912 * a process tear down), the routine derives the context reference
913 * and calls detach for each LUN associated with the context. The
914 * final detach operation will cause the context itself to be freed.
915 * Note that the saved off lfd is reset prior to calling detach to
916 * signify that the final detach should not perform a close.
918 * When initiated from a detach operation as part of the tear down
919 * of a context, the context is first completely freed and then the
920 * close is performed. This routine will fail to derive the context
921 * reference (due to the context having already been freed) and then
922 * call into the CXL release entry point.
924 * Thus, with exception to when the CXL process element (context id)
925 * lookup fails (a case that should theoretically never occur), every
926 * call into this routine results in a complete freeing of a context.
928 * As part of the detach, all per-context resources associated with the LUN
929 * are cleaned up. When detaching the last LUN for a context, the context
930 * itself is cleaned up and released.
932 * Return: 0 on success
934 static int cxlflash_cxl_release(struct inode *inode, struct file *file)
936 struct cxl_context *ctx = cxl_fops_get_context(file);
937 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
939 struct device *dev = &cfg->dev->dev;
940 struct ctx_info *ctxi = NULL;
941 struct dk_cxlflash_detach detach = { { 0 }, 0 };
942 struct lun_access *lun_access, *t;
943 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
946 ctxid = cxl_process_element(ctx);
947 if (unlikely(ctxid < 0)) {
948 dev_err(dev, "%s: Context %p was closed! (%d)\n",
949 __func__, ctx, ctxid);
953 ctxi = get_context(cfg, ctxid, file, ctrl);
954 if (unlikely(!ctxi)) {
955 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
957 dev_dbg(dev, "%s: Context %d already free!\n",
962 dev_dbg(dev, "%s: Another process owns context %d!\n",
968 dev_dbg(dev, "%s: close(%d) for context %d\n",
969 __func__, ctxi->lfd, ctxid);
971 /* Reset the file descriptor to indicate we're on a close() thread */
973 detach.context_id = ctxi->ctxid;
974 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
975 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
977 cxl_fd_release(inode, file);
979 dev_dbg(dev, "%s: returning\n", __func__);
984 * unmap_context() - clears a previously established mapping
985 * @ctxi: Context owning the mapping.
987 * This routine is used to switch between the error notification page
988 * (dummy page of all 1's) and the real mapping (established by the CXL
991 static void unmap_context(struct ctx_info *ctxi)
993 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
997 * get_err_page() - obtains and allocates the error notification page
999 * Return: error notification page on success, NULL on failure
1001 static struct page *get_err_page(void)
1003 struct page *err_page = global.err_page;
1005 if (unlikely(!err_page)) {
1006 err_page = alloc_page(GFP_KERNEL);
1007 if (unlikely(!err_page)) {
1008 pr_err("%s: Unable to allocate err_page!\n", __func__);
1012 memset(page_address(err_page), -1, PAGE_SIZE);
1014 /* Serialize update w/ other threads to avoid a leak */
1015 mutex_lock(&global.mutex);
1016 if (likely(!global.err_page))
1017 global.err_page = err_page;
1019 __free_page(err_page);
1020 err_page = global.err_page;
1022 mutex_unlock(&global.mutex);
1026 pr_debug("%s: returning err_page=%p\n", __func__, err_page);
1031 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1032 * @vma: VM area associated with mapping.
1033 * @vmf: VM fault associated with current fault.
1035 * To support error notification via MMIO, faults are 'caught' by this routine
1036 * that was inserted before passing back the adapter file descriptor on attach.
1037 * When a fault occurs, this routine evaluates if error recovery is active and
1038 * if so, installs the error page to 'notify' the user about the error state.
1039 * During normal operation, the fault is simply handled by the original fault
1040 * handler that was installed by CXL services as part of initializing the
1041 * adapter file descriptor. The VMA's page protection bits are toggled to
1042 * indicate cached/not-cached depending on the memory backing the fault.
1044 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1046 static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1048 struct file *file = vma->vm_file;
1049 struct cxl_context *ctx = cxl_fops_get_context(file);
1050 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1052 struct device *dev = &cfg->dev->dev;
1053 struct ctx_info *ctxi = NULL;
1054 struct page *err_page = NULL;
1055 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1059 ctxid = cxl_process_element(ctx);
1060 if (unlikely(ctxid < 0)) {
1061 dev_err(dev, "%s: Context %p was closed! (%d)\n",
1062 __func__, ctx, ctxid);
1066 ctxi = get_context(cfg, ctxid, file, ctrl);
1067 if (unlikely(!ctxi)) {
1068 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1072 dev_dbg(dev, "%s: fault(%d) for context %d\n",
1073 __func__, ctxi->lfd, ctxid);
1075 if (likely(!ctxi->err_recovery_active)) {
1076 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1077 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
1079 dev_dbg(dev, "%s: err recovery active, use err_page!\n",
1082 err_page = get_err_page();
1083 if (unlikely(!err_page)) {
1084 dev_err(dev, "%s: Could not obtain error page!\n",
1086 rc = VM_FAULT_RETRY;
1091 vmf->page = err_page;
1092 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1098 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1102 rc = VM_FAULT_SIGBUS;
1107 * Local MMAP vmops to 'catch' faults
1109 static const struct vm_operations_struct cxlflash_mmap_vmops = {
1110 .fault = cxlflash_mmap_fault,
1114 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1115 * @file: File installed with adapter file descriptor.
1116 * @vma: VM area associated with mapping.
1118 * Installs local mmap vmops to 'catch' faults for error notification support.
1120 * Return: 0 on success, -errno on failure
1122 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1124 struct cxl_context *ctx = cxl_fops_get_context(file);
1125 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1127 struct device *dev = &cfg->dev->dev;
1128 struct ctx_info *ctxi = NULL;
1129 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1133 ctxid = cxl_process_element(ctx);
1134 if (unlikely(ctxid < 0)) {
1135 dev_err(dev, "%s: Context %p was closed! (%d)\n",
1136 __func__, ctx, ctxid);
1141 ctxi = get_context(cfg, ctxid, file, ctrl);
1142 if (unlikely(!ctxi)) {
1143 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1148 dev_dbg(dev, "%s: mmap(%d) for context %d\n",
1149 __func__, ctxi->lfd, ctxid);
1151 rc = cxl_fd_mmap(file, vma);
1153 /* Insert ourself in the mmap fault handler path */
1154 ctxi->cxl_mmap_vmops = vma->vm_ops;
1155 vma->vm_ops = &cxlflash_mmap_vmops;
1165 * Local fops for adapter file descriptor
1167 static const struct file_operations cxlflash_cxl_fops = {
1168 .owner = THIS_MODULE,
1169 .mmap = cxlflash_cxl_mmap,
1170 .release = cxlflash_cxl_release,
1174 * cxlflash_mark_contexts_error() - move contexts to error state and list
1175 * @cfg: Internal structure associated with the host.
1177 * A context is only moved over to the error list when there are no outstanding
1178 * references to it. This ensures that a running operation has completed.
1180 * Return: 0 on success, -errno on failure
1182 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1185 struct ctx_info *ctxi = NULL;
1187 mutex_lock(&cfg->ctx_tbl_list_mutex);
1189 for (i = 0; i < MAX_CONTEXT; i++) {
1190 ctxi = cfg->ctx_tbl[i];
1192 mutex_lock(&ctxi->mutex);
1193 cfg->ctx_tbl[i] = NULL;
1194 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1195 ctxi->err_recovery_active = true;
1196 ctxi->ctrl_map = NULL;
1197 unmap_context(ctxi);
1198 mutex_unlock(&ctxi->mutex);
1202 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1209 static const struct file_operations null_fops = {
1210 .owner = THIS_MODULE,
1214 * cxlflash_disk_attach() - attach a LUN to a context
1215 * @sdev: SCSI device associated with LUN.
1216 * @attach: Attach ioctl data structure.
1218 * Creates a context and attaches LUN to it. A LUN can only be attached
1219 * one time to a context (subsequent attaches for the same context/LUN pair
1220 * are not supported). Additional LUNs can be attached to a context by
1221 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1223 * Return: 0 on success, -errno on failure
1225 static int cxlflash_disk_attach(struct scsi_device *sdev,
1226 struct dk_cxlflash_attach *attach)
1228 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1229 struct device *dev = &cfg->dev->dev;
1230 struct afu *afu = cfg->afu;
1231 struct llun_info *lli = sdev->hostdata;
1232 struct glun_info *gli = lli->parent;
1233 struct cxl_ioctl_start_work *work;
1234 struct ctx_info *ctxi = NULL;
1235 struct lun_access *lun_access = NULL;
1242 struct cxl_context *ctx;
1246 /* On first attach set fileops */
1247 if (atomic_read(&cfg->num_user_contexts) == 0)
1248 cfg->cxl_fops = cxlflash_cxl_fops;
1250 if (attach->num_interrupts > 4) {
1251 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1252 __func__, attach->num_interrupts);
1257 if (gli->max_lba == 0) {
1258 dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
1259 __func__, lli->lun_id[sdev->channel]);
1260 rc = read_cap16(sdev, lli);
1262 dev_err(dev, "%s: Invalid device! (%d)\n",
1267 dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
1268 dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
1271 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1272 rctxid = attach->context_id;
1273 ctxi = get_context(cfg, rctxid, NULL, 0);
1275 dev_dbg(dev, "%s: Bad context! (%016llX)\n",
1281 list_for_each_entry(lun_access, &ctxi->luns, list)
1282 if (lun_access->lli == lli) {
1283 dev_dbg(dev, "%s: Already attached!\n",
1290 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1291 if (unlikely(!lun_access)) {
1292 dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
1297 lun_access->lli = lli;
1298 lun_access->sdev = sdev;
1300 /* Non-NULL context indicates reuse */
1302 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
1304 list_add(&lun_access->list, &ctxi->luns);
1309 ctx = cxl_dev_context_init(cfg->dev);
1310 if (unlikely(IS_ERR_OR_NULL(ctx))) {
1311 dev_err(dev, "%s: Could not initialize context %p\n",
1317 ctxid = cxl_process_element(ctx);
1318 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
1319 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1324 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1325 if (unlikely(fd < 0)) {
1327 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1331 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1332 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1334 ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
1335 if (unlikely(!ctxi)) {
1336 dev_err(dev, "%s: Failed to create context! (%d)\n",
1342 work->num_interrupts = attach->num_interrupts;
1343 work->flags = CXL_START_WORK_NUM_IRQS;
1345 rc = cxl_start_work(ctx, work);
1347 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1352 rc = afu_attach(cfg, ctxi);
1354 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1359 * No error paths after this point. Once the fd is installed it's
1360 * visible to user space and can't be undone safely on this thread.
1361 * There is no need to worry about a deadlock here because no one
1362 * knows about us yet; we can be the only one holding our mutex.
1364 list_add(&lun_access->list, &ctxi->luns);
1365 mutex_unlock(&ctxi->mutex);
1366 mutex_lock(&cfg->ctx_tbl_list_mutex);
1367 mutex_lock(&ctxi->mutex);
1368 cfg->ctx_tbl[ctxid] = ctxi;
1369 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1370 fd_install(fd, file);
1373 attach->hdr.return_flags = 0;
1374 attach->context_id = ctxi->ctxid;
1375 attach->block_size = gli->blk_len;
1376 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1377 attach->last_lba = gli->max_lba;
1378 attach->max_xfer = (sdev->host->max_sectors * 512) / gli->blk_len;
1381 attach->adap_fd = fd;
1386 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1387 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1391 cxl_stop_context(ctx);
1394 destroy_context(cfg, ctxi);
1398 * Here, we're overriding the fops with a dummy all-NULL fops because
1399 * fput() calls the release fop, which will cause us to mistakenly
1400 * call into the CXL code. Rather than try to add yet more complexity
1401 * to that routine (cxlflash_cxl_release) we should try to fix the
1404 file->f_op = &null_fops;
1409 cxl_release_context(ctx);
1416 * recover_context() - recovers a context in error
1417 * @cfg: Internal structure associated with the host.
1418 * @ctxi: Context to release.
1420 * Restablishes the state for a context-in-error.
1422 * Return: 0 on success, -errno on failure
1424 static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
1426 struct device *dev = &cfg->dev->dev;
1428 int old_fd, fd = -1;
1431 struct cxl_context *ctx;
1432 struct afu *afu = cfg->afu;
1434 ctx = cxl_dev_context_init(cfg->dev);
1435 if (unlikely(IS_ERR_OR_NULL(ctx))) {
1436 dev_err(dev, "%s: Could not initialize context %p\n",
1442 ctxid = cxl_process_element(ctx);
1443 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
1444 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1449 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1450 if (unlikely(fd < 0)) {
1452 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1456 rc = cxl_start_work(ctx, &ctxi->work);
1458 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1463 /* Update with new MMIO area based on updated context id */
1464 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1466 rc = afu_attach(cfg, ctxi);
1468 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1473 * No error paths after this point. Once the fd is installed it's
1474 * visible to user space and can't be undone safely on this thread.
1477 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1483 * Put context back in table (note the reinit of the context list);
1484 * we must first drop the context's mutex and then acquire it in
1485 * order with the table/list mutex to avoid a deadlock - safe to do
1486 * here because no one can find us at this moment in time.
1488 mutex_unlock(&ctxi->mutex);
1489 mutex_lock(&cfg->ctx_tbl_list_mutex);
1490 mutex_lock(&ctxi->mutex);
1491 list_del_init(&ctxi->list);
1492 cfg->ctx_tbl[ctxid] = ctxi;
1493 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1494 fd_install(fd, file);
1496 /* Release the original adapter fd and associated CXL resources */
1499 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1500 __func__, ctxid, fd, rc);
1504 cxl_stop_context(ctx);
1509 cxl_release_context(ctx);
1514 * check_state() - checks and responds to the current adapter state
1515 * @cfg: Internal structure associated with the host.
1517 * This routine can block and should only be used on process context.
1518 * Note that when waking up from waiting in limbo, the state is unknown
1519 * and must be checked again before proceeding.
1521 * Return: 0 on success, -errno on failure
1523 static int check_state(struct cxlflash_cfg *cfg)
1525 struct device *dev = &cfg->dev->dev;
1529 switch (cfg->state) {
1531 dev_dbg(dev, "%s: Limbo, going to wait...\n", __func__);
1532 rc = wait_event_interruptible(cfg->limbo_waitq,
1533 cfg->state != STATE_LIMBO);
1537 case STATE_FAILTERM:
1538 dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
1549 * cxlflash_afu_recover() - initiates AFU recovery
1550 * @sdev: SCSI device associated with LUN.
1551 * @recover: Recover ioctl data structure.
1553 * Only a single recovery is allowed at a time to avoid exhausting CXL
1554 * resources (leading to recovery failure) in the event that we're up
1555 * against the maximum number of contexts limit. For similar reasons,
1556 * a context recovery is retried if there are multiple recoveries taking
1557 * place at the same time and the failure was due to CXL services being
1558 * unable to keep up.
1560 * Because a user can detect an error condition before the kernel, it is
1561 * quite possible for this routine to act as the kernel's EEH detection
1562 * source (MMIO read of mbox_r). Because of this, there is a window of
1563 * time where an EEH might have been detected but not yet 'serviced'
1564 * (callback invoked, causing the device to enter limbo state). To avoid
1565 * looping in this routine during that window, a 1 second sleep is in place
1566 * between the time the MMIO failure is detected and the time a wait on the
1567 * limbo wait queue is attempted via check_state().
1569 * Return: 0 on success, -errno on failure
1571 static int cxlflash_afu_recover(struct scsi_device *sdev,
1572 struct dk_cxlflash_recover_afu *recover)
1574 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1575 struct device *dev = &cfg->dev->dev;
1576 struct llun_info *lli = sdev->hostdata;
1577 struct afu *afu = cfg->afu;
1578 struct ctx_info *ctxi = NULL;
1579 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1580 u64 ctxid = DECODE_CTXID(recover->context_id),
1581 rctxid = recover->context_id;
1583 int lretry = 20; /* up to 2 seconds */
1586 atomic_inc(&cfg->recovery_threads);
1587 rc = mutex_lock_interruptible(mutex);
1591 dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
1592 __func__, recover->reason, rctxid);
1595 /* Ensure that this process is attached to the context */
1596 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1597 if (unlikely(!ctxi)) {
1598 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1603 if (ctxi->err_recovery_active) {
1605 rc = recover_context(cfg, ctxi);
1607 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
1608 __func__, ctxid, rc);
1609 if ((rc == -ENODEV) &&
1610 ((atomic_read(&cfg->recovery_threads) > 1) ||
1612 dev_dbg(dev, "%s: Going to try again!\n",
1614 mutex_unlock(mutex);
1616 rc = mutex_lock_interruptible(mutex);
1625 ctxi->err_recovery_active = false;
1626 recover->context_id = ctxi->ctxid;
1627 recover->adap_fd = ctxi->lfd;
1628 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1629 recover->hdr.return_flags |=
1630 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1634 /* Test if in error state */
1635 reg = readq_be(&afu->ctrl_map->mbox_r);
1637 dev_dbg(dev, "%s: MMIO read fail! Wait for recovery...\n",
1639 mutex_unlock(&ctxi->mutex);
1642 rc = check_state(cfg);
1648 dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
1652 mutex_unlock(mutex);
1653 atomic_dec_if_positive(&cfg->recovery_threads);
1658 * process_sense() - evaluates and processes sense data
1659 * @sdev: SCSI device associated with LUN.
1660 * @verify: Verify ioctl data structure.
1662 * Return: 0 on success, -errno on failure
1664 static int process_sense(struct scsi_device *sdev,
1665 struct dk_cxlflash_verify *verify)
1667 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1668 struct device *dev = &cfg->dev->dev;
1669 struct llun_info *lli = sdev->hostdata;
1670 struct glun_info *gli = lli->parent;
1671 u64 prev_lba = gli->max_lba;
1672 struct scsi_sense_hdr sshdr = { 0 };
1675 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1676 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1678 dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
1683 switch (sshdr.sense_key) {
1685 case RECOVERED_ERROR:
1689 case UNIT_ATTENTION:
1690 switch (sshdr.asc) {
1691 case 0x29: /* Power on Reset or Device Reset */
1693 case 0x2A: /* Device settings/capacity changed */
1694 rc = read_cap16(sdev, lli);
1699 if (prev_lba != gli->max_lba)
1700 dev_dbg(dev, "%s: Capacity changed old=%lld "
1701 "new=%lld\n", __func__, prev_lba,
1704 case 0x3F: /* Report LUNs changed, Rescan. */
1705 scsi_scan_host(cfg->host);
1717 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1718 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1723 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1724 * @sdev: SCSI device associated with LUN.
1725 * @verify: Verify ioctl data structure.
1727 * Return: 0 on success, -errno on failure
1729 static int cxlflash_disk_verify(struct scsi_device *sdev,
1730 struct dk_cxlflash_verify *verify)
1733 struct ctx_info *ctxi = NULL;
1734 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1735 struct device *dev = &cfg->dev->dev;
1736 struct llun_info *lli = sdev->hostdata;
1737 struct glun_info *gli = lli->parent;
1738 struct sisl_rht_entry *rhte = NULL;
1739 res_hndl_t rhndl = verify->rsrc_handle;
1740 u64 ctxid = DECODE_CTXID(verify->context_id),
1741 rctxid = verify->context_id;
1744 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
1745 "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
1746 verify->hint, verify->hdr.flags);
1748 ctxi = get_context(cfg, rctxid, lli, 0);
1749 if (unlikely(!ctxi)) {
1750 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1755 rhte = get_rhte(ctxi, rhndl, lli);
1756 if (unlikely(!rhte)) {
1757 dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
1764 * Look at the hint/sense to see if it requires us to redrive
1765 * inquiry (i.e. the Unit attention is due to the WWN changing).
1767 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1768 rc = process_sense(sdev, verify);
1770 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1776 switch (gli->mode) {
1778 last_lba = gli->max_lba;
1781 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1782 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1783 last_lba /= CXLFLASH_BLOCK_SIZE;
1787 WARN(1, "Unsupported LUN mode!");
1790 verify->last_lba = last_lba;
1795 dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
1796 __func__, rc, verify->last_lba);
1801 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1802 * @cmd: The ioctl command to decode.
1804 * Return: A string identifying the decoded ioctl.
1806 static char *decode_ioctl(int cmd)
1809 case DK_CXLFLASH_ATTACH:
1810 return __stringify_1(DK_CXLFLASH_ATTACH);
1811 case DK_CXLFLASH_USER_DIRECT:
1812 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1813 case DK_CXLFLASH_USER_VIRTUAL:
1814 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1815 case DK_CXLFLASH_VLUN_RESIZE:
1816 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1817 case DK_CXLFLASH_RELEASE:
1818 return __stringify_1(DK_CXLFLASH_RELEASE);
1819 case DK_CXLFLASH_DETACH:
1820 return __stringify_1(DK_CXLFLASH_DETACH);
1821 case DK_CXLFLASH_VERIFY:
1822 return __stringify_1(DK_CXLFLASH_VERIFY);
1823 case DK_CXLFLASH_VLUN_CLONE:
1824 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1825 case DK_CXLFLASH_RECOVER_AFU:
1826 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1827 case DK_CXLFLASH_MANAGE_LUN:
1828 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1835 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1836 * @sdev: SCSI device associated with LUN.
1837 * @arg: UDirect ioctl data structure.
1839 * On successful return, the user is informed of the resource handle
1840 * to be used to identify the direct lun and the size (in blocks) of
1841 * the direct lun in last LBA format.
1843 * Return: 0 on success, -errno on failure
1845 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1847 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1848 struct device *dev = &cfg->dev->dev;
1849 struct afu *afu = cfg->afu;
1850 struct llun_info *lli = sdev->hostdata;
1851 struct glun_info *gli = lli->parent;
1853 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1855 u64 ctxid = DECODE_CTXID(pphys->context_id),
1856 rctxid = pphys->context_id;
1859 u64 rsrc_handle = -1;
1860 u32 port = CHAN2PORT(sdev->channel);
1864 struct ctx_info *ctxi = NULL;
1865 struct sisl_rht_entry *rhte = NULL;
1867 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
1869 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1871 dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
1876 ctxi = get_context(cfg, rctxid, lli, 0);
1877 if (unlikely(!ctxi)) {
1878 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1883 rhte = rhte_checkout(ctxi, lli);
1884 if (unlikely(!rhte)) {
1885 dev_dbg(dev, "%s: too many opens for this context\n", __func__);
1886 rc = -EMFILE; /* too many opens */
1890 rsrc_handle = (rhte - ctxi->rht_start);
1892 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
1893 cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
1895 last_lba = gli->max_lba;
1896 pphys->hdr.return_flags = 0;
1897 pphys->last_lba = last_lba;
1898 pphys->rsrc_handle = rsrc_handle;
1903 dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
1904 __func__, rsrc_handle, rc, last_lba);
1908 cxlflash_lun_detach(gli);
1913 * ioctl_common() - common IOCTL handler for driver
1914 * @sdev: SCSI device associated with LUN.
1915 * @cmd: IOCTL command.
1917 * Handles common fencing operations that are valid for multiple ioctls. Always
1918 * allow through ioctls that are cleanup oriented in nature, even when operating
1919 * in a failed/terminating state.
1921 * Return: 0 on success, -errno on failure
1923 static int ioctl_common(struct scsi_device *sdev, int cmd)
1925 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1926 struct device *dev = &cfg->dev->dev;
1927 struct llun_info *lli = sdev->hostdata;
1930 if (unlikely(!lli)) {
1931 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
1936 rc = check_state(cfg);
1937 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
1939 case DK_CXLFLASH_VLUN_RESIZE:
1940 case DK_CXLFLASH_RELEASE:
1941 case DK_CXLFLASH_DETACH:
1942 dev_dbg(dev, "%s: Command override! (%d)\n",
1953 * cxlflash_ioctl() - IOCTL handler for driver
1954 * @sdev: SCSI device associated with LUN.
1955 * @cmd: IOCTL command.
1956 * @arg: Userspace ioctl data structure.
1958 * Return: 0 on success, -errno on failure
1960 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
1962 typedef int (*sioctl) (struct scsi_device *, void *);
1964 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1965 struct device *dev = &cfg->dev->dev;
1966 struct afu *afu = cfg->afu;
1967 struct dk_cxlflash_hdr *hdr;
1968 char buf[sizeof(union cxlflash_ioctls)];
1970 bool known_ioctl = false;
1973 struct Scsi_Host *shost = sdev->host;
1974 sioctl do_ioctl = NULL;
1976 static const struct {
1979 } ioctl_tbl[] = { /* NOTE: order matters here */
1980 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
1981 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
1982 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
1983 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
1984 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
1985 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
1986 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
1987 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
1988 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
1989 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
1992 /* Restrict command set to physical support only for internal LUN */
1993 if (afu->internal_lun)
1995 case DK_CXLFLASH_RELEASE:
1996 case DK_CXLFLASH_USER_VIRTUAL:
1997 case DK_CXLFLASH_VLUN_RESIZE:
1998 case DK_CXLFLASH_VLUN_CLONE:
1999 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2000 __func__, decode_ioctl(cmd), afu->internal_lun);
2002 goto cxlflash_ioctl_exit;
2006 case DK_CXLFLASH_ATTACH:
2007 case DK_CXLFLASH_USER_DIRECT:
2008 case DK_CXLFLASH_RELEASE:
2009 case DK_CXLFLASH_DETACH:
2010 case DK_CXLFLASH_VERIFY:
2011 case DK_CXLFLASH_RECOVER_AFU:
2012 case DK_CXLFLASH_USER_VIRTUAL:
2013 case DK_CXLFLASH_VLUN_RESIZE:
2014 case DK_CXLFLASH_VLUN_CLONE:
2015 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2016 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2017 sdev->channel, sdev->id, sdev->lun);
2018 rc = ioctl_common(sdev, cmd);
2020 goto cxlflash_ioctl_exit;
2024 case DK_CXLFLASH_MANAGE_LUN:
2026 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2027 size = ioctl_tbl[idx].size;
2028 do_ioctl = ioctl_tbl[idx].ioctl;
2030 if (likely(do_ioctl))
2036 goto cxlflash_ioctl_exit;
2039 if (unlikely(copy_from_user(&buf, arg, size))) {
2040 dev_err(dev, "%s: copy_from_user() fail! "
2041 "size=%lu cmd=%d (%s) arg=%p\n",
2042 __func__, size, cmd, decode_ioctl(cmd), arg);
2044 goto cxlflash_ioctl_exit;
2047 hdr = (struct dk_cxlflash_hdr *)&buf;
2048 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2049 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2050 __func__, hdr->version, decode_ioctl(cmd));
2052 goto cxlflash_ioctl_exit;
2055 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2056 dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
2058 goto cxlflash_ioctl_exit;
2061 rc = do_ioctl(sdev, (void *)&buf);
2063 if (unlikely(copy_to_user(arg, &buf, size))) {
2064 dev_err(dev, "%s: copy_to_user() fail! "
2065 "size=%lu cmd=%d (%s) arg=%p\n",
2066 __func__, size, cmd, decode_ioctl(cmd), arg);
2070 /* fall through to exit */
2072 cxlflash_ioctl_exit:
2073 if (unlikely(rc && known_ioctl))
2074 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2075 "returned rc %d\n", __func__,
2076 decode_ioctl(cmd), cmd, shost->host_no,
2077 sdev->channel, sdev->id, sdev->lun, rc);
2079 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2080 "returned rc %d\n", __func__, decode_ioctl(cmd),
2081 cmd, shost->host_no, sdev->channel, sdev->id,