2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
28 * fc_rport's represent N_Port's within the fabric.
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
58 struct workqueue_struct *rport_event_queue;
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
66 static void fc_rport_recv_plogi_req(struct fc_rport_priv *,
67 struct fc_seq *, struct fc_frame *);
68 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69 struct fc_seq *, struct fc_frame *);
70 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71 struct fc_seq *, struct fc_frame *);
72 static void fc_rport_recv_logo_req(struct fc_rport_priv *,
73 struct fc_seq *, struct fc_frame *);
74 static void fc_rport_timeout(struct work_struct *);
75 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_work(struct work_struct *);
79 static const char *fc_rport_state_names[] = {
80 [RPORT_ST_INIT] = "Init",
81 [RPORT_ST_PLOGI] = "PLOGI",
82 [RPORT_ST_PRLI] = "PRLI",
83 [RPORT_ST_RTV] = "RTV",
84 [RPORT_ST_READY] = "Ready",
85 [RPORT_ST_LOGO] = "LOGO",
86 [RPORT_ST_DELETE] = "Delete",
90 * fc_rport_create() - create remote port in INIT state.
92 * @ids: remote port identifiers.
94 * Locking note: this may be called without locks held, but
95 * is usually called from discovery with the disc_mutex held.
97 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
98 struct fc_rport_identifiers *ids)
100 struct fc_rport_priv *rdata;
102 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
107 kref_init(&rdata->kref);
108 mutex_init(&rdata->rp_mutex);
109 rdata->local_port = lport;
110 rdata->rp_state = RPORT_ST_INIT;
111 rdata->event = RPORT_EV_NONE;
112 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
113 rdata->e_d_tov = lport->e_d_tov;
114 rdata->r_a_tov = lport->r_a_tov;
115 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
116 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
117 INIT_WORK(&rdata->event_work, fc_rport_work);
122 * fc_rport_destroy() - free a remote port after last reference is released.
123 * @kref: pointer to kref inside struct fc_rport_priv
125 static void fc_rport_destroy(struct kref *kref)
127 struct fc_rport_priv *rdata;
129 rdata = container_of(kref, struct fc_rport_priv, kref);
134 * fc_rport_state() - return a string for the state the rport is in
135 * @rdata: remote port private data
137 static const char *fc_rport_state(struct fc_rport_priv *rdata)
141 cp = fc_rport_state_names[rdata->rp_state];
148 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
149 * @rport: Pointer to Fibre Channel remote port structure
150 * @timeout: timeout in seconds
152 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
155 rport->dev_loss_tmo = timeout + 5;
157 rport->dev_loss_tmo = 30;
159 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
162 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
163 * @flp: FLOGI payload structure
164 * @maxval: upper limit, may be less than what is in the service parameters
166 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
172 * Get max payload from the common service parameters and the
173 * class 3 receive data field size.
175 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
176 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
178 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
179 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
185 * fc_rport_state_enter() - Change the rport's state
186 * @rdata: The rport whose state should change
187 * @new: The new state of the rport
189 * Locking Note: Called with the rport lock held
191 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
192 enum fc_rport_state new)
194 if (rdata->rp_state != new)
196 rdata->rp_state = new;
199 static void fc_rport_work(struct work_struct *work)
202 struct fc_rport_priv *rdata =
203 container_of(work, struct fc_rport_priv, event_work);
204 struct fc_rport_libfc_priv *rp;
205 enum fc_rport_event event;
206 struct fc_lport *lport = rdata->local_port;
207 struct fc_rport_operations *rport_ops;
208 struct fc_rport_identifiers ids;
209 struct fc_rport *rport;
211 mutex_lock(&rdata->rp_mutex);
212 event = rdata->event;
213 rport_ops = rdata->ops;
214 rport = rdata->rport;
216 FC_RPORT_DBG(rdata, "work event %u\n", event);
221 rdata->event = RPORT_EV_NONE;
222 kref_get(&rdata->kref);
223 mutex_unlock(&rdata->rp_mutex);
226 rport = fc_remote_port_add(lport->host, 0, &ids);
228 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
229 lport->tt.rport_logoff(rdata);
230 kref_put(&rdata->kref, lport->tt.rport_destroy);
233 mutex_lock(&rdata->rp_mutex);
235 FC_RPORT_DBG(rdata, "rport already allocated\n");
236 rdata->rport = rport;
237 rport->maxframe_size = rdata->maxframe_size;
238 rport->supported_classes = rdata->supported_classes;
241 rp->local_port = lport;
242 rp->rp_state = rdata->rp_state;
243 rp->flags = rdata->flags;
244 rp->e_d_tov = rdata->e_d_tov;
245 rp->r_a_tov = rdata->r_a_tov;
246 mutex_unlock(&rdata->rp_mutex);
248 if (rport_ops->event_callback) {
249 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
250 rport_ops->event_callback(lport, rdata, event);
252 kref_put(&rdata->kref, lport->tt.rport_destroy);
255 case RPORT_EV_FAILED:
258 port_id = rdata->ids.port_id;
259 mutex_unlock(&rdata->rp_mutex);
261 if (rport_ops->event_callback) {
262 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
263 rport_ops->event_callback(lport, rdata, event);
265 cancel_delayed_work_sync(&rdata->retry_work);
268 * Reset any outstanding exchanges before freeing rport.
270 lport->tt.exch_mgr_reset(lport, 0, port_id);
271 lport->tt.exch_mgr_reset(lport, port_id, 0);
275 rp->rp_state = RPORT_ST_DELETE;
276 mutex_lock(&rdata->rp_mutex);
278 mutex_unlock(&rdata->rp_mutex);
279 fc_remote_port_delete(rport);
281 kref_put(&rdata->kref, lport->tt.rport_destroy);
285 mutex_unlock(&rdata->rp_mutex);
291 * fc_rport_login() - Start the remote port login state machine
292 * @rdata: private remote port
294 * Locking Note: Called without the rport lock held. This
295 * function will hold the rport lock, call an _enter_*
296 * function and then unlock the rport.
298 int fc_rport_login(struct fc_rport_priv *rdata)
300 mutex_lock(&rdata->rp_mutex);
302 FC_RPORT_DBG(rdata, "Login to port\n");
304 fc_rport_enter_plogi(rdata);
306 mutex_unlock(&rdata->rp_mutex);
312 * fc_rport_enter_delete() - schedule a remote port to be deleted.
313 * @rdata: private remote port
314 * @event: event to report as the reason for deletion
316 * Locking Note: Called with the rport lock held.
318 * Allow state change into DELETE only once.
320 * Call queue_work only if there's no event already pending.
321 * Set the new event so that the old pending event will not occur.
322 * Since we have the mutex, even if fc_rport_work() is already started,
323 * it'll see the new event.
325 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
326 enum fc_rport_event event)
328 if (rdata->rp_state == RPORT_ST_DELETE)
331 FC_RPORT_DBG(rdata, "Delete port\n");
333 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
335 if (rdata->event == RPORT_EV_NONE)
336 queue_work(rport_event_queue, &rdata->event_work);
337 rdata->event = event;
341 * fc_rport_logoff() - Logoff and remove an rport
342 * @rdata: private remote port
344 * Locking Note: Called without the rport lock held. This
345 * function will hold the rport lock, call an _enter_*
346 * function and then unlock the rport.
348 int fc_rport_logoff(struct fc_rport_priv *rdata)
350 mutex_lock(&rdata->rp_mutex);
352 FC_RPORT_DBG(rdata, "Remove port\n");
354 if (rdata->rp_state == RPORT_ST_DELETE) {
355 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
356 mutex_unlock(&rdata->rp_mutex);
360 fc_rport_enter_logo(rdata);
363 * Change the state to Delete so that we discard
366 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
367 mutex_unlock(&rdata->rp_mutex);
374 * fc_rport_enter_ready() - The rport is ready
375 * @rdata: private remote port
377 * Locking Note: The rport lock is expected to be held before calling
380 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
382 fc_rport_state_enter(rdata, RPORT_ST_READY);
384 FC_RPORT_DBG(rdata, "Port is Ready\n");
386 if (rdata->event == RPORT_EV_NONE)
387 queue_work(rport_event_queue, &rdata->event_work);
388 rdata->event = RPORT_EV_READY;
392 * fc_rport_timeout() - Handler for the retry_work timer.
393 * @work: The work struct of the fc_rport_priv
395 * Locking Note: Called without the rport lock held. This
396 * function will hold the rport lock, call an _enter_*
397 * function and then unlock the rport.
399 static void fc_rport_timeout(struct work_struct *work)
401 struct fc_rport_priv *rdata =
402 container_of(work, struct fc_rport_priv, retry_work.work);
404 mutex_lock(&rdata->rp_mutex);
406 switch (rdata->rp_state) {
408 fc_rport_enter_plogi(rdata);
411 fc_rport_enter_prli(rdata);
414 fc_rport_enter_rtv(rdata);
417 fc_rport_enter_logo(rdata);
421 case RPORT_ST_DELETE:
425 mutex_unlock(&rdata->rp_mutex);
429 * fc_rport_error() - Error handler, called once retries have been exhausted
430 * @rdata: private remote port
431 * @fp: The frame pointer
433 * Locking Note: The rport lock is expected to be held before
434 * calling this routine
436 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
438 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
439 PTR_ERR(fp), fc_rport_state(rdata), rdata->retries);
441 switch (rdata->rp_state) {
445 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
448 fc_rport_enter_ready(rdata);
450 case RPORT_ST_DELETE:
458 * fc_rport_error_retry() - Error handler when retries are desired
459 * @rdata: private remote port data
460 * @fp: The frame pointer
462 * If the error was an exchange timeout retry immediately,
463 * otherwise wait for E_D_TOV.
465 * Locking Note: The rport lock is expected to be held before
466 * calling this routine
468 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
471 unsigned long delay = FC_DEF_E_D_TOV;
473 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
474 if (PTR_ERR(fp) == -FC_EX_CLOSED)
475 return fc_rport_error(rdata, fp);
477 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
478 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
479 PTR_ERR(fp), fc_rport_state(rdata));
481 /* no additional delay on exchange timeouts */
482 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
484 schedule_delayed_work(&rdata->retry_work, delay);
488 return fc_rport_error(rdata, fp);
492 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
493 * @sp: current sequence in the PLOGI exchange
494 * @fp: response frame
495 * @rdata_arg: private remote port data
497 * Locking Note: This function will be called without the rport lock
498 * held, but it will lock, call an _enter_* function or fc_rport_error
499 * and then unlock the rport.
501 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
504 struct fc_rport_priv *rdata = rdata_arg;
505 struct fc_lport *lport = rdata->local_port;
506 struct fc_els_flogi *plp = NULL;
512 mutex_lock(&rdata->rp_mutex);
514 FC_RPORT_DBG(rdata, "Received a PLOGI response\n");
516 if (rdata->rp_state != RPORT_ST_PLOGI) {
517 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
518 "%s\n", fc_rport_state(rdata));
525 fc_rport_error_retry(rdata, fp);
529 op = fc_frame_payload_op(fp);
530 if (op == ELS_LS_ACC &&
531 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
532 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
533 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
535 tov = ntohl(plp->fl_csp.sp_e_d_tov);
536 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
538 if (tov > rdata->e_d_tov)
539 rdata->e_d_tov = tov;
540 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
541 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
542 if (cssp_seq < csp_seq)
544 rdata->max_seq = csp_seq;
545 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
548 * If the rport is one of the well known addresses
549 * we skip PRLI and RTV and go straight to READY.
551 if (rdata->ids.port_id >= FC_FID_DOM_MGR)
552 fc_rport_enter_ready(rdata);
554 fc_rport_enter_prli(rdata);
556 fc_rport_error_retry(rdata, fp);
561 mutex_unlock(&rdata->rp_mutex);
562 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
566 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
567 * @rdata: private remote port data
569 * Locking Note: The rport lock is expected to be held before calling
572 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
574 struct fc_lport *lport = rdata->local_port;
577 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
578 fc_rport_state(rdata));
580 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
582 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
583 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
585 fc_rport_error_retry(rdata, fp);
588 rdata->e_d_tov = lport->e_d_tov;
590 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
591 fc_rport_plogi_resp, rdata, lport->e_d_tov))
592 fc_rport_error_retry(rdata, fp);
594 kref_get(&rdata->kref);
598 * fc_rport_prli_resp() - Process Login (PRLI) response handler
599 * @sp: current sequence in the PRLI exchange
600 * @fp: response frame
601 * @rdata_arg: private remote port data
603 * Locking Note: This function will be called without the rport lock
604 * held, but it will lock, call an _enter_* function or fc_rport_error
605 * and then unlock the rport.
607 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
610 struct fc_rport_priv *rdata = rdata_arg;
612 struct fc_els_prli prli;
613 struct fc_els_spp spp;
615 u32 roles = FC_RPORT_ROLE_UNKNOWN;
619 mutex_lock(&rdata->rp_mutex);
621 FC_RPORT_DBG(rdata, "Received a PRLI response\n");
623 if (rdata->rp_state != RPORT_ST_PRLI) {
624 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
625 "%s\n", fc_rport_state(rdata));
632 fc_rport_error_retry(rdata, fp);
636 op = fc_frame_payload_op(fp);
637 if (op == ELS_LS_ACC) {
638 pp = fc_frame_payload_get(fp, sizeof(*pp));
639 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
640 fcp_parm = ntohl(pp->spp.spp_params);
641 if (fcp_parm & FCP_SPPF_RETRY)
642 rdata->flags |= FC_RP_FLAGS_RETRY;
645 rdata->supported_classes = FC_COS_CLASS3;
646 if (fcp_parm & FCP_SPPF_INIT_FCN)
647 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
648 if (fcp_parm & FCP_SPPF_TARG_FCN)
649 roles |= FC_RPORT_ROLE_FCP_TARGET;
651 rdata->ids.roles = roles;
652 fc_rport_enter_rtv(rdata);
655 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
656 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
662 mutex_unlock(&rdata->rp_mutex);
663 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
667 * fc_rport_logo_resp() - Logout (LOGO) response handler
668 * @sp: current sequence in the LOGO exchange
669 * @fp: response frame
670 * @rdata_arg: private remote port data
672 * Locking Note: This function will be called without the rport lock
673 * held, but it will lock, call an _enter_* function or fc_rport_error
674 * and then unlock the rport.
676 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
679 struct fc_rport_priv *rdata = rdata_arg;
682 mutex_lock(&rdata->rp_mutex);
684 FC_RPORT_DBG(rdata, "Received a LOGO response\n");
686 if (rdata->rp_state != RPORT_ST_LOGO) {
687 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
688 "%s\n", fc_rport_state(rdata));
695 fc_rport_error_retry(rdata, fp);
699 op = fc_frame_payload_op(fp);
700 if (op == ELS_LS_ACC) {
701 fc_rport_enter_rtv(rdata);
703 FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
704 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
710 mutex_unlock(&rdata->rp_mutex);
711 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
715 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
716 * @rdata: private remote port data
718 * Locking Note: The rport lock is expected to be held before calling
721 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
723 struct fc_lport *lport = rdata->local_port;
725 struct fc_els_prli prli;
726 struct fc_els_spp spp;
730 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
731 fc_rport_state(rdata));
733 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
735 fp = fc_frame_alloc(lport, sizeof(*pp));
737 fc_rport_error_retry(rdata, fp);
741 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
742 fc_rport_prli_resp, rdata, lport->e_d_tov))
743 fc_rport_error_retry(rdata, fp);
745 kref_get(&rdata->kref);
749 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
750 * @sp: current sequence in the RTV exchange
751 * @fp: response frame
752 * @rdata_arg: private remote port data
754 * Many targets don't seem to support this.
756 * Locking Note: This function will be called without the rport lock
757 * held, but it will lock, call an _enter_* function or fc_rport_error
758 * and then unlock the rport.
760 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
763 struct fc_rport_priv *rdata = rdata_arg;
766 mutex_lock(&rdata->rp_mutex);
768 FC_RPORT_DBG(rdata, "Received a RTV response\n");
770 if (rdata->rp_state != RPORT_ST_RTV) {
771 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
772 "%s\n", fc_rport_state(rdata));
779 fc_rport_error(rdata, fp);
783 op = fc_frame_payload_op(fp);
784 if (op == ELS_LS_ACC) {
785 struct fc_els_rtv_acc *rtv;
789 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
791 toq = ntohl(rtv->rtv_toq);
792 tov = ntohl(rtv->rtv_r_a_tov);
795 rdata->r_a_tov = tov;
796 tov = ntohl(rtv->rtv_e_d_tov);
797 if (toq & FC_ELS_RTV_EDRES)
801 rdata->e_d_tov = tov;
805 fc_rport_enter_ready(rdata);
810 mutex_unlock(&rdata->rp_mutex);
811 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
815 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
816 * @rdata: private remote port data
818 * Locking Note: The rport lock is expected to be held before calling
821 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
824 struct fc_lport *lport = rdata->local_port;
826 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
827 fc_rport_state(rdata));
829 fc_rport_state_enter(rdata, RPORT_ST_RTV);
831 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
833 fc_rport_error_retry(rdata, fp);
837 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
838 fc_rport_rtv_resp, rdata, lport->e_d_tov))
839 fc_rport_error_retry(rdata, fp);
841 kref_get(&rdata->kref);
845 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
846 * @rdata: private remote port data
848 * Locking Note: The rport lock is expected to be held before calling
851 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
853 struct fc_lport *lport = rdata->local_port;
856 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
857 fc_rport_state(rdata));
859 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
861 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
863 fc_rport_error_retry(rdata, fp);
867 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
868 fc_rport_logo_resp, rdata, lport->e_d_tov))
869 fc_rport_error_retry(rdata, fp);
871 kref_get(&rdata->kref);
876 * fc_rport_recv_req() - Receive a request from a rport
877 * @sp: current sequence in the PLOGI exchange
878 * @fp: response frame
879 * @rdata_arg: private remote port data
881 * Locking Note: Called without the rport lock held. This
882 * function will hold the rport lock, call an _enter_*
883 * function and then unlock the rport.
885 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
886 struct fc_rport_priv *rdata)
888 struct fc_lport *lport = rdata->local_port;
890 struct fc_frame_header *fh;
891 struct fc_seq_els_data els_data;
894 mutex_lock(&rdata->rp_mutex);
897 els_data.explan = ELS_EXPL_NONE;
898 els_data.reason = ELS_RJT_NONE;
900 fh = fc_frame_header_get(fp);
902 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
903 op = fc_frame_payload_op(fp);
906 fc_rport_recv_plogi_req(rdata, sp, fp);
909 fc_rport_recv_prli_req(rdata, sp, fp);
912 fc_rport_recv_prlo_req(rdata, sp, fp);
915 fc_rport_recv_logo_req(rdata, sp, fp);
919 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
923 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
926 els_data.reason = ELS_RJT_UNSUP;
927 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
932 mutex_unlock(&rdata->rp_mutex);
936 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
937 * @rdata: private remote port data
938 * @sp: current sequence in the PLOGI exchange
939 * @fp: PLOGI request frame
941 * Locking Note: The rport lock is exected to be held before calling
944 static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata,
945 struct fc_seq *sp, struct fc_frame *rx_fp)
947 struct fc_lport *lport = rdata->local_port;
948 struct fc_frame *fp = rx_fp;
950 struct fc_frame_header *fh;
951 struct fc_els_flogi *pl;
952 struct fc_seq_els_data rjt_data;
956 enum fc_els_rjt_reason reject = 0;
960 fh = fc_frame_header_get(fp);
962 FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n",
963 fc_rport_state(rdata));
965 sid = ntoh24(fh->fh_s_id);
966 pl = fc_frame_payload_get(fp, sizeof(*pl));
968 FC_RPORT_DBG(rdata, "Received PLOGI too short\n");
970 /* XXX TBD: send reject? */
974 wwpn = get_unaligned_be64(&pl->fl_wwpn);
975 wwnn = get_unaligned_be64(&pl->fl_wwnn);
978 * If the session was just created, possibly due to the incoming PLOGI,
979 * set the state appropriately and accept the PLOGI.
981 * If we had also sent a PLOGI, and if the received PLOGI is from a
982 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
983 * "command already in progress".
985 * XXX TBD: If the session was ready before, the PLOGI should result in
986 * all outstanding exchanges being reset.
988 switch (rdata->rp_state) {
990 FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT "
991 "- reject\n", (unsigned long long)wwpn);
992 reject = ELS_RJT_UNSUP;
995 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n",
997 if (wwpn < lport->wwpn)
998 reject = ELS_RJT_INPROG;
1001 case RPORT_ST_READY:
1002 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1003 "- ignored for now\n", rdata->rp_state);
1004 /* XXX TBD - should reset */
1006 case RPORT_ST_DELETE:
1008 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected "
1009 "state %d\n", rdata->rp_state);
1016 rjt_data.reason = reject;
1017 rjt_data.explan = ELS_EXPL_NONE;
1018 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1021 fp = fc_frame_alloc(lport, sizeof(*pl));
1024 rjt_data.reason = ELS_RJT_UNAB;
1025 rjt_data.explan = ELS_EXPL_NONE;
1026 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1029 sp = lport->tt.seq_start_next(sp);
1031 rdata->ids.port_name = wwpn;
1032 rdata->ids.node_name = wwnn;
1035 * Get session payload size from incoming PLOGI.
1037 rdata->maxframe_size =
1038 fc_plogi_get_maxframe(pl, lport->mfs);
1039 fc_frame_free(rx_fp);
1040 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1043 * Send LS_ACC. If this fails,
1044 * the originator should retry.
1046 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1047 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1048 ep = fc_seq_exch(sp);
1049 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1050 FC_TYPE_ELS, f_ctl, 0);
1051 lport->tt.seq_send(lport, sp, fp);
1052 if (rdata->rp_state == RPORT_ST_PLOGI)
1053 fc_rport_enter_prli(rdata);
1059 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1060 * @rdata: private remote port data
1061 * @sp: current sequence in the PRLI exchange
1062 * @fp: PRLI request frame
1064 * Locking Note: The rport lock is exected to be held before calling
1067 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1068 struct fc_seq *sp, struct fc_frame *rx_fp)
1070 struct fc_lport *lport = rdata->local_port;
1072 struct fc_frame *fp;
1073 struct fc_frame_header *fh;
1075 struct fc_els_prli prli;
1076 struct fc_els_spp spp;
1078 struct fc_els_spp *rspp; /* request service param page */
1079 struct fc_els_spp *spp; /* response spp */
1082 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1083 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1084 enum fc_els_spp_resp resp;
1085 struct fc_seq_els_data rjt_data;
1088 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1091 fh = fc_frame_header_get(rx_fp);
1093 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1094 fc_rport_state(rdata));
1096 switch (rdata->rp_state) {
1098 case RPORT_ST_READY:
1099 reason = ELS_RJT_NONE;
1102 fc_frame_free(rx_fp);
1106 len = fr_len(rx_fp) - sizeof(*fh);
1107 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1109 reason = ELS_RJT_PROT;
1110 explan = ELS_EXPL_INV_LEN;
1112 plen = ntohs(pp->prli.prli_len);
1113 if ((plen % 4) != 0 || plen > len) {
1114 reason = ELS_RJT_PROT;
1115 explan = ELS_EXPL_INV_LEN;
1116 } else if (plen < len) {
1119 plen = pp->prli.prli_spp_len;
1120 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1121 plen > len || len < sizeof(*pp)) {
1122 reason = ELS_RJT_PROT;
1123 explan = ELS_EXPL_INV_LEN;
1127 if (reason != ELS_RJT_NONE ||
1128 (fp = fc_frame_alloc(lport, len)) == NULL) {
1129 rjt_data.reason = reason;
1130 rjt_data.explan = explan;
1131 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1133 sp = lport->tt.seq_start_next(sp);
1135 pp = fc_frame_payload_get(fp, len);
1138 pp->prli.prli_cmd = ELS_LS_ACC;
1139 pp->prli.prli_spp_len = plen;
1140 pp->prli.prli_len = htons(len);
1141 len -= sizeof(struct fc_els_prli);
1144 * Go through all the service parameter pages and build
1145 * response. If plen indicates longer SPP than standard,
1146 * use that. The entire response has been pre-cleared above.
1149 while (len >= plen) {
1150 spp->spp_type = rspp->spp_type;
1151 spp->spp_type_ext = rspp->spp_type_ext;
1152 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1153 resp = FC_SPP_RESP_ACK;
1154 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1155 resp = FC_SPP_RESP_NO_PA;
1156 switch (rspp->spp_type) {
1157 case 0: /* common to all FC-4 types */
1160 fcp_parm = ntohl(rspp->spp_params);
1161 if (fcp_parm * FCP_SPPF_RETRY)
1162 rdata->flags |= FC_RP_FLAGS_RETRY;
1163 rdata->supported_classes = FC_COS_CLASS3;
1164 if (fcp_parm & FCP_SPPF_INIT_FCN)
1165 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1166 if (fcp_parm & FCP_SPPF_TARG_FCN)
1167 roles |= FC_RPORT_ROLE_FCP_TARGET;
1168 rdata->ids.roles = roles;
1171 htonl(lport->service_params);
1174 resp = FC_SPP_RESP_INVL;
1177 spp->spp_flags |= resp;
1179 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1180 spp = (struct fc_els_spp *)((char *)spp + plen);
1184 * Send LS_ACC. If this fails, the originator should retry.
1186 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1187 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1188 ep = fc_seq_exch(sp);
1189 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1190 FC_TYPE_ELS, f_ctl, 0);
1191 lport->tt.seq_send(lport, sp, fp);
1194 * Get lock and re-check state.
1196 switch (rdata->rp_state) {
1198 fc_rport_enter_ready(rdata);
1200 case RPORT_ST_READY:
1206 fc_frame_free(rx_fp);
1210 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1211 * @rdata: private remote port data
1212 * @sp: current sequence in the PRLO exchange
1213 * @fp: PRLO request frame
1215 * Locking Note: The rport lock is exected to be held before calling
1218 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1220 struct fc_frame *fp)
1222 struct fc_lport *lport = rdata->local_port;
1224 struct fc_frame_header *fh;
1225 struct fc_seq_els_data rjt_data;
1227 fh = fc_frame_header_get(fp);
1229 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1230 fc_rport_state(rdata));
1232 if (rdata->rp_state == RPORT_ST_DELETE) {
1238 rjt_data.reason = ELS_RJT_UNAB;
1239 rjt_data.explan = ELS_EXPL_NONE;
1240 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1245 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1246 * @rdata: private remote port data
1247 * @sp: current sequence in the LOGO exchange
1248 * @fp: LOGO request frame
1250 * Locking Note: The rport lock is exected to be held before calling
1253 static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata,
1255 struct fc_frame *fp)
1257 struct fc_frame_header *fh;
1258 struct fc_lport *lport = rdata->local_port;
1260 fh = fc_frame_header_get(fp);
1262 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1263 fc_rport_state(rdata));
1265 if (rdata->rp_state == RPORT_ST_DELETE) {
1270 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1272 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1276 static void fc_rport_flush_queue(void)
1278 flush_workqueue(rport_event_queue);
1281 int fc_rport_init(struct fc_lport *lport)
1283 if (!lport->tt.rport_create)
1284 lport->tt.rport_create = fc_rport_create;
1286 if (!lport->tt.rport_login)
1287 lport->tt.rport_login = fc_rport_login;
1289 if (!lport->tt.rport_logoff)
1290 lport->tt.rport_logoff = fc_rport_logoff;
1292 if (!lport->tt.rport_recv_req)
1293 lport->tt.rport_recv_req = fc_rport_recv_req;
1295 if (!lport->tt.rport_flush_queue)
1296 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1298 if (!lport->tt.rport_destroy)
1299 lport->tt.rport_destroy = fc_rport_destroy;
1303 EXPORT_SYMBOL(fc_rport_init);
1305 int fc_setup_rport(void)
1307 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1308 if (!rport_event_queue)
1312 EXPORT_SYMBOL(fc_setup_rport);
1314 void fc_destroy_rport(void)
1316 destroy_workqueue(rport_event_queue);
1318 EXPORT_SYMBOL(fc_destroy_rport);
1320 void fc_rport_terminate_io(struct fc_rport *rport)
1322 struct fc_rport_libfc_priv *rp = rport->dd_data;
1323 struct fc_lport *lport = rp->local_port;
1325 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1326 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1328 EXPORT_SYMBOL(fc_rport_terminate_io);