[SCSI] libfc: fix rport event race between READY and LOGO
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / libfc / fc_rport.c
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19
20 /*
21  * RPORT GENERAL INFO
22  *
23  * This file contains all processing regarding fc_rports. It contains the
24  * rport state machine and does all rport interaction with the transport class.
25  * There should be no other places in libfc that interact directly with the
26  * transport class in regards to adding and deleting rports.
27  *
28  * fc_rport's represent N_Port's within the fabric.
29  */
30
31 /*
32  * RPORT LOCKING
33  *
34  * The rport should never hold the rport mutex and then attempt to acquire
35  * either the lport or disc mutexes. The rport's mutex is considered lesser
36  * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37  * more comments on the heirarchy.
38  *
39  * The locking strategy is similar to the lport's strategy. The lock protects
40  * the rport's states and is held and released by the entry points to the rport
41  * block. All _enter_* functions correspond to rport states and expect the rport
42  * mutex to be locked before calling them. This means that rports only handle
43  * one request or response at a time, since they're not critical for the I/O
44  * path this potential over-use of the mutex is acceptable.
45  */
46
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
54
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
57
58 struct workqueue_struct *rport_event_queue;
59
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
65
66 static void fc_rport_recv_plogi_req(struct fc_rport_priv *,
67                                     struct fc_seq *, struct fc_frame *);
68 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69                                    struct fc_seq *, struct fc_frame *);
70 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71                                    struct fc_seq *, struct fc_frame *);
72 static void fc_rport_recv_logo_req(struct fc_rport_priv *,
73                                    struct fc_seq *, struct fc_frame *);
74 static void fc_rport_timeout(struct work_struct *);
75 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_work(struct work_struct *);
78
79 static const char *fc_rport_state_names[] = {
80         [RPORT_ST_INIT] = "Init",
81         [RPORT_ST_PLOGI] = "PLOGI",
82         [RPORT_ST_PRLI] = "PRLI",
83         [RPORT_ST_RTV] = "RTV",
84         [RPORT_ST_READY] = "Ready",
85         [RPORT_ST_LOGO] = "LOGO",
86         [RPORT_ST_DELETE] = "Delete",
87 };
88
89 /**
90  * fc_rport_create() - create remote port in INIT state.
91  * @lport: local port.
92  * @ids: remote port identifiers.
93  *
94  * Locking note: this may be called without locks held, but
95  * is usually called from discovery with the disc_mutex held.
96  */
97 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
98                                              struct fc_rport_identifiers *ids)
99 {
100         struct fc_rport_priv *rdata;
101
102         rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
103         if (!rdata)
104                 return NULL;
105
106         rdata->ids = *ids;
107         kref_init(&rdata->kref);
108         mutex_init(&rdata->rp_mutex);
109         rdata->local_port = lport;
110         rdata->rp_state = RPORT_ST_INIT;
111         rdata->event = RPORT_EV_NONE;
112         rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
113         rdata->e_d_tov = lport->e_d_tov;
114         rdata->r_a_tov = lport->r_a_tov;
115         rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
116         INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
117         INIT_WORK(&rdata->event_work, fc_rport_work);
118         return rdata;
119 }
120
121 /**
122  * fc_rport_destroy() - free a remote port after last reference is released.
123  * @kref: pointer to kref inside struct fc_rport_priv
124  */
125 static void fc_rport_destroy(struct kref *kref)
126 {
127         struct fc_rport_priv *rdata;
128
129         rdata = container_of(kref, struct fc_rport_priv, kref);
130         kfree(rdata);
131 }
132
133 /**
134  * fc_rport_state() - return a string for the state the rport is in
135  * @rdata: remote port private data
136  */
137 static const char *fc_rport_state(struct fc_rport_priv *rdata)
138 {
139         const char *cp;
140
141         cp = fc_rport_state_names[rdata->rp_state];
142         if (!cp)
143                 cp = "Unknown";
144         return cp;
145 }
146
147 /**
148  * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
149  * @rport: Pointer to Fibre Channel remote port structure
150  * @timeout: timeout in seconds
151  */
152 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
153 {
154         if (timeout)
155                 rport->dev_loss_tmo = timeout + 5;
156         else
157                 rport->dev_loss_tmo = 30;
158 }
159 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
160
161 /**
162  * fc_plogi_get_maxframe() - Get max payload from the common service parameters
163  * @flp: FLOGI payload structure
164  * @maxval: upper limit, may be less than what is in the service parameters
165  */
166 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
167                                           unsigned int maxval)
168 {
169         unsigned int mfs;
170
171         /*
172          * Get max payload from the common service parameters and the
173          * class 3 receive data field size.
174          */
175         mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
176         if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
177                 maxval = mfs;
178         mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
179         if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
180                 maxval = mfs;
181         return maxval;
182 }
183
184 /**
185  * fc_rport_state_enter() - Change the rport's state
186  * @rdata: The rport whose state should change
187  * @new: The new state of the rport
188  *
189  * Locking Note: Called with the rport lock held
190  */
191 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
192                                  enum fc_rport_state new)
193 {
194         if (rdata->rp_state != new)
195                 rdata->retries = 0;
196         rdata->rp_state = new;
197 }
198
199 static void fc_rport_work(struct work_struct *work)
200 {
201         u32 port_id;
202         struct fc_rport_priv *rdata =
203                 container_of(work, struct fc_rport_priv, event_work);
204         struct fc_rport_libfc_priv *rp;
205         enum fc_rport_event event;
206         struct fc_lport *lport = rdata->local_port;
207         struct fc_rport_operations *rport_ops;
208         struct fc_rport_identifiers ids;
209         struct fc_rport *rport;
210
211         mutex_lock(&rdata->rp_mutex);
212         event = rdata->event;
213         rport_ops = rdata->ops;
214         rport = rdata->rport;
215
216         FC_RPORT_DBG(rdata, "work event %u\n", event);
217
218         switch (event) {
219         case RPORT_EV_READY:
220                 ids = rdata->ids;
221                 rdata->event = RPORT_EV_NONE;
222                 kref_get(&rdata->kref);
223                 mutex_unlock(&rdata->rp_mutex);
224
225                 if (!rport)
226                         rport = fc_remote_port_add(lport->host, 0, &ids);
227                 if (!rport) {
228                         FC_RPORT_DBG(rdata, "Failed to add the rport\n");
229                         lport->tt.rport_logoff(rdata);
230                         kref_put(&rdata->kref, lport->tt.rport_destroy);
231                         return;
232                 }
233                 mutex_lock(&rdata->rp_mutex);
234                 if (rdata->rport)
235                         FC_RPORT_DBG(rdata, "rport already allocated\n");
236                 rdata->rport = rport;
237                 rport->maxframe_size = rdata->maxframe_size;
238                 rport->supported_classes = rdata->supported_classes;
239
240                 rp = rport->dd_data;
241                 rp->local_port = lport;
242                 rp->rp_state = rdata->rp_state;
243                 rp->flags = rdata->flags;
244                 rp->e_d_tov = rdata->e_d_tov;
245                 rp->r_a_tov = rdata->r_a_tov;
246                 mutex_unlock(&rdata->rp_mutex);
247
248                 if (rport_ops->event_callback) {
249                         FC_RPORT_DBG(rdata, "callback ev %d\n", event);
250                         rport_ops->event_callback(lport, rdata, event);
251                 }
252                 kref_put(&rdata->kref, lport->tt.rport_destroy);
253                 break;
254
255         case RPORT_EV_FAILED:
256         case RPORT_EV_LOGO:
257         case RPORT_EV_STOP:
258                 port_id = rdata->ids.port_id;
259                 mutex_unlock(&rdata->rp_mutex);
260
261                 if (rport_ops->event_callback) {
262                         FC_RPORT_DBG(rdata, "callback ev %d\n", event);
263                         rport_ops->event_callback(lport, rdata, event);
264                 }
265                 cancel_delayed_work_sync(&rdata->retry_work);
266
267                 /*
268                  * Reset any outstanding exchanges before freeing rport.
269                  */
270                 lport->tt.exch_mgr_reset(lport, 0, port_id);
271                 lport->tt.exch_mgr_reset(lport, port_id, 0);
272
273                 if (rport) {
274                         rp = rport->dd_data;
275                         rp->rp_state = RPORT_ST_DELETE;
276                         mutex_lock(&rdata->rp_mutex);
277                         rdata->rport = NULL;
278                         mutex_unlock(&rdata->rp_mutex);
279                         fc_remote_port_delete(rport);
280                 }
281                 kref_put(&rdata->kref, lport->tt.rport_destroy);
282                 break;
283
284         default:
285                 mutex_unlock(&rdata->rp_mutex);
286                 break;
287         }
288 }
289
290 /**
291  * fc_rport_login() - Start the remote port login state machine
292  * @rdata: private remote port
293  *
294  * Locking Note: Called without the rport lock held. This
295  * function will hold the rport lock, call an _enter_*
296  * function and then unlock the rport.
297  */
298 int fc_rport_login(struct fc_rport_priv *rdata)
299 {
300         mutex_lock(&rdata->rp_mutex);
301
302         FC_RPORT_DBG(rdata, "Login to port\n");
303
304         fc_rport_enter_plogi(rdata);
305
306         mutex_unlock(&rdata->rp_mutex);
307
308         return 0;
309 }
310
311 /**
312  * fc_rport_enter_delete() - schedule a remote port to be deleted.
313  * @rdata: private remote port
314  * @event: event to report as the reason for deletion
315  *
316  * Locking Note: Called with the rport lock held.
317  *
318  * Allow state change into DELETE only once.
319  *
320  * Call queue_work only if there's no event already pending.
321  * Set the new event so that the old pending event will not occur.
322  * Since we have the mutex, even if fc_rport_work() is already started,
323  * it'll see the new event.
324  */
325 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
326                                   enum fc_rport_event event)
327 {
328         if (rdata->rp_state == RPORT_ST_DELETE)
329                 return;
330
331         FC_RPORT_DBG(rdata, "Delete port\n");
332
333         fc_rport_state_enter(rdata, RPORT_ST_DELETE);
334
335         if (rdata->event == RPORT_EV_NONE)
336                 queue_work(rport_event_queue, &rdata->event_work);
337         rdata->event = event;
338 }
339
340 /**
341  * fc_rport_logoff() - Logoff and remove an rport
342  * @rdata: private remote port
343  *
344  * Locking Note: Called without the rport lock held. This
345  * function will hold the rport lock, call an _enter_*
346  * function and then unlock the rport.
347  */
348 int fc_rport_logoff(struct fc_rport_priv *rdata)
349 {
350         mutex_lock(&rdata->rp_mutex);
351
352         FC_RPORT_DBG(rdata, "Remove port\n");
353
354         if (rdata->rp_state == RPORT_ST_DELETE) {
355                 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
356                 mutex_unlock(&rdata->rp_mutex);
357                 goto out;
358         }
359
360         fc_rport_enter_logo(rdata);
361
362         /*
363          * Change the state to Delete so that we discard
364          * the response.
365          */
366         fc_rport_enter_delete(rdata, RPORT_EV_STOP);
367         mutex_unlock(&rdata->rp_mutex);
368
369 out:
370         return 0;
371 }
372
373 /**
374  * fc_rport_enter_ready() - The rport is ready
375  * @rdata: private remote port
376  *
377  * Locking Note: The rport lock is expected to be held before calling
378  * this routine.
379  */
380 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
381 {
382         fc_rport_state_enter(rdata, RPORT_ST_READY);
383
384         FC_RPORT_DBG(rdata, "Port is Ready\n");
385
386         if (rdata->event == RPORT_EV_NONE)
387                 queue_work(rport_event_queue, &rdata->event_work);
388         rdata->event = RPORT_EV_READY;
389 }
390
391 /**
392  * fc_rport_timeout() - Handler for the retry_work timer.
393  * @work: The work struct of the fc_rport_priv
394  *
395  * Locking Note: Called without the rport lock held. This
396  * function will hold the rport lock, call an _enter_*
397  * function and then unlock the rport.
398  */
399 static void fc_rport_timeout(struct work_struct *work)
400 {
401         struct fc_rport_priv *rdata =
402                 container_of(work, struct fc_rport_priv, retry_work.work);
403
404         mutex_lock(&rdata->rp_mutex);
405
406         switch (rdata->rp_state) {
407         case RPORT_ST_PLOGI:
408                 fc_rport_enter_plogi(rdata);
409                 break;
410         case RPORT_ST_PRLI:
411                 fc_rport_enter_prli(rdata);
412                 break;
413         case RPORT_ST_RTV:
414                 fc_rport_enter_rtv(rdata);
415                 break;
416         case RPORT_ST_LOGO:
417                 fc_rport_enter_logo(rdata);
418                 break;
419         case RPORT_ST_READY:
420         case RPORT_ST_INIT:
421         case RPORT_ST_DELETE:
422                 break;
423         }
424
425         mutex_unlock(&rdata->rp_mutex);
426 }
427
428 /**
429  * fc_rport_error() - Error handler, called once retries have been exhausted
430  * @rdata: private remote port
431  * @fp: The frame pointer
432  *
433  * Locking Note: The rport lock is expected to be held before
434  * calling this routine
435  */
436 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
437 {
438         FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
439                      PTR_ERR(fp), fc_rport_state(rdata), rdata->retries);
440
441         switch (rdata->rp_state) {
442         case RPORT_ST_PLOGI:
443         case RPORT_ST_PRLI:
444         case RPORT_ST_LOGO:
445                 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
446                 break;
447         case RPORT_ST_RTV:
448                 fc_rport_enter_ready(rdata);
449                 break;
450         case RPORT_ST_DELETE:
451         case RPORT_ST_READY:
452         case RPORT_ST_INIT:
453                 break;
454         }
455 }
456
457 /**
458  * fc_rport_error_retry() - Error handler when retries are desired
459  * @rdata: private remote port data
460  * @fp: The frame pointer
461  *
462  * If the error was an exchange timeout retry immediately,
463  * otherwise wait for E_D_TOV.
464  *
465  * Locking Note: The rport lock is expected to be held before
466  * calling this routine
467  */
468 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
469                                  struct fc_frame *fp)
470 {
471         unsigned long delay = FC_DEF_E_D_TOV;
472
473         /* make sure this isn't an FC_EX_CLOSED error, never retry those */
474         if (PTR_ERR(fp) == -FC_EX_CLOSED)
475                 return fc_rport_error(rdata, fp);
476
477         if (rdata->retries < rdata->local_port->max_rport_retry_count) {
478                 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
479                              PTR_ERR(fp), fc_rport_state(rdata));
480                 rdata->retries++;
481                 /* no additional delay on exchange timeouts */
482                 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
483                         delay = 0;
484                 schedule_delayed_work(&rdata->retry_work, delay);
485                 return;
486         }
487
488         return fc_rport_error(rdata, fp);
489 }
490
491 /**
492  * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
493  * @sp: current sequence in the PLOGI exchange
494  * @fp: response frame
495  * @rdata_arg: private remote port data
496  *
497  * Locking Note: This function will be called without the rport lock
498  * held, but it will lock, call an _enter_* function or fc_rport_error
499  * and then unlock the rport.
500  */
501 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
502                                 void *rdata_arg)
503 {
504         struct fc_rport_priv *rdata = rdata_arg;
505         struct fc_lport *lport = rdata->local_port;
506         struct fc_els_flogi *plp = NULL;
507         unsigned int tov;
508         u16 csp_seq;
509         u16 cssp_seq;
510         u8 op;
511
512         mutex_lock(&rdata->rp_mutex);
513
514         FC_RPORT_DBG(rdata, "Received a PLOGI response\n");
515
516         if (rdata->rp_state != RPORT_ST_PLOGI) {
517                 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
518                              "%s\n", fc_rport_state(rdata));
519                 if (IS_ERR(fp))
520                         goto err;
521                 goto out;
522         }
523
524         if (IS_ERR(fp)) {
525                 fc_rport_error_retry(rdata, fp);
526                 goto err;
527         }
528
529         op = fc_frame_payload_op(fp);
530         if (op == ELS_LS_ACC &&
531             (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
532                 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
533                 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
534
535                 tov = ntohl(plp->fl_csp.sp_e_d_tov);
536                 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
537                         tov /= 1000;
538                 if (tov > rdata->e_d_tov)
539                         rdata->e_d_tov = tov;
540                 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
541                 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
542                 if (cssp_seq < csp_seq)
543                         csp_seq = cssp_seq;
544                 rdata->max_seq = csp_seq;
545                 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
546
547                 /*
548                  * If the rport is one of the well known addresses
549                  * we skip PRLI and RTV and go straight to READY.
550                  */
551                 if (rdata->ids.port_id >= FC_FID_DOM_MGR)
552                         fc_rport_enter_ready(rdata);
553                 else
554                         fc_rport_enter_prli(rdata);
555         } else
556                 fc_rport_error_retry(rdata, fp);
557
558 out:
559         fc_frame_free(fp);
560 err:
561         mutex_unlock(&rdata->rp_mutex);
562         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
563 }
564
565 /**
566  * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
567  * @rdata: private remote port data
568  *
569  * Locking Note: The rport lock is expected to be held before calling
570  * this routine.
571  */
572 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
573 {
574         struct fc_lport *lport = rdata->local_port;
575         struct fc_frame *fp;
576
577         FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
578                      fc_rport_state(rdata));
579
580         fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
581
582         rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
583         fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
584         if (!fp) {
585                 fc_rport_error_retry(rdata, fp);
586                 return;
587         }
588         rdata->e_d_tov = lport->e_d_tov;
589
590         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
591                                   fc_rport_plogi_resp, rdata, lport->e_d_tov))
592                 fc_rport_error_retry(rdata, fp);
593         else
594                 kref_get(&rdata->kref);
595 }
596
597 /**
598  * fc_rport_prli_resp() - Process Login (PRLI) response handler
599  * @sp: current sequence in the PRLI exchange
600  * @fp: response frame
601  * @rdata_arg: private remote port data
602  *
603  * Locking Note: This function will be called without the rport lock
604  * held, but it will lock, call an _enter_* function or fc_rport_error
605  * and then unlock the rport.
606  */
607 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
608                                void *rdata_arg)
609 {
610         struct fc_rport_priv *rdata = rdata_arg;
611         struct {
612                 struct fc_els_prli prli;
613                 struct fc_els_spp spp;
614         } *pp;
615         u32 roles = FC_RPORT_ROLE_UNKNOWN;
616         u32 fcp_parm = 0;
617         u8 op;
618
619         mutex_lock(&rdata->rp_mutex);
620
621         FC_RPORT_DBG(rdata, "Received a PRLI response\n");
622
623         if (rdata->rp_state != RPORT_ST_PRLI) {
624                 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
625                              "%s\n", fc_rport_state(rdata));
626                 if (IS_ERR(fp))
627                         goto err;
628                 goto out;
629         }
630
631         if (IS_ERR(fp)) {
632                 fc_rport_error_retry(rdata, fp);
633                 goto err;
634         }
635
636         op = fc_frame_payload_op(fp);
637         if (op == ELS_LS_ACC) {
638                 pp = fc_frame_payload_get(fp, sizeof(*pp));
639                 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
640                         fcp_parm = ntohl(pp->spp.spp_params);
641                         if (fcp_parm & FCP_SPPF_RETRY)
642                                 rdata->flags |= FC_RP_FLAGS_RETRY;
643                 }
644
645                 rdata->supported_classes = FC_COS_CLASS3;
646                 if (fcp_parm & FCP_SPPF_INIT_FCN)
647                         roles |= FC_RPORT_ROLE_FCP_INITIATOR;
648                 if (fcp_parm & FCP_SPPF_TARG_FCN)
649                         roles |= FC_RPORT_ROLE_FCP_TARGET;
650
651                 rdata->ids.roles = roles;
652                 fc_rport_enter_rtv(rdata);
653
654         } else {
655                 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
656                 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
657         }
658
659 out:
660         fc_frame_free(fp);
661 err:
662         mutex_unlock(&rdata->rp_mutex);
663         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
664 }
665
666 /**
667  * fc_rport_logo_resp() - Logout (LOGO) response handler
668  * @sp: current sequence in the LOGO exchange
669  * @fp: response frame
670  * @rdata_arg: private remote port data
671  *
672  * Locking Note: This function will be called without the rport lock
673  * held, but it will lock, call an _enter_* function or fc_rport_error
674  * and then unlock the rport.
675  */
676 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
677                                void *rdata_arg)
678 {
679         struct fc_rport_priv *rdata = rdata_arg;
680         u8 op;
681
682         mutex_lock(&rdata->rp_mutex);
683
684         FC_RPORT_DBG(rdata, "Received a LOGO response\n");
685
686         if (rdata->rp_state != RPORT_ST_LOGO) {
687                 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
688                              "%s\n", fc_rport_state(rdata));
689                 if (IS_ERR(fp))
690                         goto err;
691                 goto out;
692         }
693
694         if (IS_ERR(fp)) {
695                 fc_rport_error_retry(rdata, fp);
696                 goto err;
697         }
698
699         op = fc_frame_payload_op(fp);
700         if (op == ELS_LS_ACC) {
701                 fc_rport_enter_rtv(rdata);
702         } else {
703                 FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
704                 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
705         }
706
707 out:
708         fc_frame_free(fp);
709 err:
710         mutex_unlock(&rdata->rp_mutex);
711         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
712 }
713
714 /**
715  * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
716  * @rdata: private remote port data
717  *
718  * Locking Note: The rport lock is expected to be held before calling
719  * this routine.
720  */
721 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
722 {
723         struct fc_lport *lport = rdata->local_port;
724         struct {
725                 struct fc_els_prli prli;
726                 struct fc_els_spp spp;
727         } *pp;
728         struct fc_frame *fp;
729
730         FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
731                      fc_rport_state(rdata));
732
733         fc_rport_state_enter(rdata, RPORT_ST_PRLI);
734
735         fp = fc_frame_alloc(lport, sizeof(*pp));
736         if (!fp) {
737                 fc_rport_error_retry(rdata, fp);
738                 return;
739         }
740
741         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
742                                   fc_rport_prli_resp, rdata, lport->e_d_tov))
743                 fc_rport_error_retry(rdata, fp);
744         else
745                 kref_get(&rdata->kref);
746 }
747
748 /**
749  * fc_rport_els_rtv_resp() - Request Timeout Value response handler
750  * @sp: current sequence in the RTV exchange
751  * @fp: response frame
752  * @rdata_arg: private remote port data
753  *
754  * Many targets don't seem to support this.
755  *
756  * Locking Note: This function will be called without the rport lock
757  * held, but it will lock, call an _enter_* function or fc_rport_error
758  * and then unlock the rport.
759  */
760 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
761                               void *rdata_arg)
762 {
763         struct fc_rport_priv *rdata = rdata_arg;
764         u8 op;
765
766         mutex_lock(&rdata->rp_mutex);
767
768         FC_RPORT_DBG(rdata, "Received a RTV response\n");
769
770         if (rdata->rp_state != RPORT_ST_RTV) {
771                 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
772                              "%s\n", fc_rport_state(rdata));
773                 if (IS_ERR(fp))
774                         goto err;
775                 goto out;
776         }
777
778         if (IS_ERR(fp)) {
779                 fc_rport_error(rdata, fp);
780                 goto err;
781         }
782
783         op = fc_frame_payload_op(fp);
784         if (op == ELS_LS_ACC) {
785                 struct fc_els_rtv_acc *rtv;
786                 u32 toq;
787                 u32 tov;
788
789                 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
790                 if (rtv) {
791                         toq = ntohl(rtv->rtv_toq);
792                         tov = ntohl(rtv->rtv_r_a_tov);
793                         if (tov == 0)
794                                 tov = 1;
795                         rdata->r_a_tov = tov;
796                         tov = ntohl(rtv->rtv_e_d_tov);
797                         if (toq & FC_ELS_RTV_EDRES)
798                                 tov /= 1000000;
799                         if (tov == 0)
800                                 tov = 1;
801                         rdata->e_d_tov = tov;
802                 }
803         }
804
805         fc_rport_enter_ready(rdata);
806
807 out:
808         fc_frame_free(fp);
809 err:
810         mutex_unlock(&rdata->rp_mutex);
811         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
812 }
813
814 /**
815  * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
816  * @rdata: private remote port data
817  *
818  * Locking Note: The rport lock is expected to be held before calling
819  * this routine.
820  */
821 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
822 {
823         struct fc_frame *fp;
824         struct fc_lport *lport = rdata->local_port;
825
826         FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
827                      fc_rport_state(rdata));
828
829         fc_rport_state_enter(rdata, RPORT_ST_RTV);
830
831         fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
832         if (!fp) {
833                 fc_rport_error_retry(rdata, fp);
834                 return;
835         }
836
837         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
838                                      fc_rport_rtv_resp, rdata, lport->e_d_tov))
839                 fc_rport_error_retry(rdata, fp);
840         else
841                 kref_get(&rdata->kref);
842 }
843
844 /**
845  * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
846  * @rdata: private remote port data
847  *
848  * Locking Note: The rport lock is expected to be held before calling
849  * this routine.
850  */
851 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
852 {
853         struct fc_lport *lport = rdata->local_port;
854         struct fc_frame *fp;
855
856         FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
857                      fc_rport_state(rdata));
858
859         fc_rport_state_enter(rdata, RPORT_ST_LOGO);
860
861         fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
862         if (!fp) {
863                 fc_rport_error_retry(rdata, fp);
864                 return;
865         }
866
867         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
868                                   fc_rport_logo_resp, rdata, lport->e_d_tov))
869                 fc_rport_error_retry(rdata, fp);
870         else
871                 kref_get(&rdata->kref);
872 }
873
874
875 /**
876  * fc_rport_recv_req() - Receive a request from a rport
877  * @sp: current sequence in the PLOGI exchange
878  * @fp: response frame
879  * @rdata_arg: private remote port data
880  *
881  * Locking Note: Called without the rport lock held. This
882  * function will hold the rport lock, call an _enter_*
883  * function and then unlock the rport.
884  */
885 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
886                        struct fc_rport_priv *rdata)
887 {
888         struct fc_lport *lport = rdata->local_port;
889
890         struct fc_frame_header *fh;
891         struct fc_seq_els_data els_data;
892         u8 op;
893
894         mutex_lock(&rdata->rp_mutex);
895
896         els_data.fp = NULL;
897         els_data.explan = ELS_EXPL_NONE;
898         els_data.reason = ELS_RJT_NONE;
899
900         fh = fc_frame_header_get(fp);
901
902         if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
903                 op = fc_frame_payload_op(fp);
904                 switch (op) {
905                 case ELS_PLOGI:
906                         fc_rport_recv_plogi_req(rdata, sp, fp);
907                         break;
908                 case ELS_PRLI:
909                         fc_rport_recv_prli_req(rdata, sp, fp);
910                         break;
911                 case ELS_PRLO:
912                         fc_rport_recv_prlo_req(rdata, sp, fp);
913                         break;
914                 case ELS_LOGO:
915                         fc_rport_recv_logo_req(rdata, sp, fp);
916                         break;
917                 case ELS_RRQ:
918                         els_data.fp = fp;
919                         lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
920                         break;
921                 case ELS_REC:
922                         els_data.fp = fp;
923                         lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
924                         break;
925                 default:
926                         els_data.reason = ELS_RJT_UNSUP;
927                         lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
928                         break;
929                 }
930         }
931
932         mutex_unlock(&rdata->rp_mutex);
933 }
934
935 /**
936  * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
937  * @rdata: private remote port data
938  * @sp: current sequence in the PLOGI exchange
939  * @fp: PLOGI request frame
940  *
941  * Locking Note: The rport lock is exected to be held before calling
942  * this function.
943  */
944 static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata,
945                                     struct fc_seq *sp, struct fc_frame *rx_fp)
946 {
947         struct fc_lport *lport = rdata->local_port;
948         struct fc_frame *fp = rx_fp;
949         struct fc_exch *ep;
950         struct fc_frame_header *fh;
951         struct fc_els_flogi *pl;
952         struct fc_seq_els_data rjt_data;
953         u32 sid;
954         u64 wwpn;
955         u64 wwnn;
956         enum fc_els_rjt_reason reject = 0;
957         u32 f_ctl;
958         rjt_data.fp = NULL;
959
960         fh = fc_frame_header_get(fp);
961
962         FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n",
963                      fc_rport_state(rdata));
964
965         sid = ntoh24(fh->fh_s_id);
966         pl = fc_frame_payload_get(fp, sizeof(*pl));
967         if (!pl) {
968                 FC_RPORT_DBG(rdata, "Received PLOGI too short\n");
969                 WARN_ON(1);
970                 /* XXX TBD: send reject? */
971                 fc_frame_free(fp);
972                 return;
973         }
974         wwpn = get_unaligned_be64(&pl->fl_wwpn);
975         wwnn = get_unaligned_be64(&pl->fl_wwnn);
976
977         /*
978          * If the session was just created, possibly due to the incoming PLOGI,
979          * set the state appropriately and accept the PLOGI.
980          *
981          * If we had also sent a PLOGI, and if the received PLOGI is from a
982          * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
983          * "command already in progress".
984          *
985          * XXX TBD: If the session was ready before, the PLOGI should result in
986          * all outstanding exchanges being reset.
987          */
988         switch (rdata->rp_state) {
989         case RPORT_ST_INIT:
990                 FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT "
991                              "- reject\n", (unsigned long long)wwpn);
992                 reject = ELS_RJT_UNSUP;
993                 break;
994         case RPORT_ST_PLOGI:
995                 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n",
996                              rdata->rp_state);
997                 if (wwpn < lport->wwpn)
998                         reject = ELS_RJT_INPROG;
999                 break;
1000         case RPORT_ST_PRLI:
1001         case RPORT_ST_READY:
1002                 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1003                              "- ignored for now\n", rdata->rp_state);
1004                 /* XXX TBD - should reset */
1005                 break;
1006         case RPORT_ST_DELETE:
1007         default:
1008                 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected "
1009                              "state %d\n", rdata->rp_state);
1010                 fc_frame_free(fp);
1011                 return;
1012                 break;
1013         }
1014
1015         if (reject) {
1016                 rjt_data.reason = reject;
1017                 rjt_data.explan = ELS_EXPL_NONE;
1018                 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1019                 fc_frame_free(fp);
1020         } else {
1021                 fp = fc_frame_alloc(lport, sizeof(*pl));
1022                 if (fp == NULL) {
1023                         fp = rx_fp;
1024                         rjt_data.reason = ELS_RJT_UNAB;
1025                         rjt_data.explan = ELS_EXPL_NONE;
1026                         lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1027                         fc_frame_free(fp);
1028                 } else {
1029                         sp = lport->tt.seq_start_next(sp);
1030                         WARN_ON(!sp);
1031                         rdata->ids.port_name = wwpn;
1032                         rdata->ids.node_name = wwnn;
1033
1034                         /*
1035                          * Get session payload size from incoming PLOGI.
1036                          */
1037                         rdata->maxframe_size =
1038                                 fc_plogi_get_maxframe(pl, lport->mfs);
1039                         fc_frame_free(rx_fp);
1040                         fc_plogi_fill(lport, fp, ELS_LS_ACC);
1041
1042                         /*
1043                          * Send LS_ACC.  If this fails,
1044                          * the originator should retry.
1045                          */
1046                         f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1047                         f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1048                         ep = fc_seq_exch(sp);
1049                         fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1050                                        FC_TYPE_ELS, f_ctl, 0);
1051                         lport->tt.seq_send(lport, sp, fp);
1052                         if (rdata->rp_state == RPORT_ST_PLOGI)
1053                                 fc_rport_enter_prli(rdata);
1054                 }
1055         }
1056 }
1057
1058 /**
1059  * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1060  * @rdata: private remote port data
1061  * @sp: current sequence in the PRLI exchange
1062  * @fp: PRLI request frame
1063  *
1064  * Locking Note: The rport lock is exected to be held before calling
1065  * this function.
1066  */
1067 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1068                                    struct fc_seq *sp, struct fc_frame *rx_fp)
1069 {
1070         struct fc_lport *lport = rdata->local_port;
1071         struct fc_exch *ep;
1072         struct fc_frame *fp;
1073         struct fc_frame_header *fh;
1074         struct {
1075                 struct fc_els_prli prli;
1076                 struct fc_els_spp spp;
1077         } *pp;
1078         struct fc_els_spp *rspp;        /* request service param page */
1079         struct fc_els_spp *spp; /* response spp */
1080         unsigned int len;
1081         unsigned int plen;
1082         enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1083         enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1084         enum fc_els_spp_resp resp;
1085         struct fc_seq_els_data rjt_data;
1086         u32 f_ctl;
1087         u32 fcp_parm;
1088         u32 roles = FC_RPORT_ROLE_UNKNOWN;
1089         rjt_data.fp = NULL;
1090
1091         fh = fc_frame_header_get(rx_fp);
1092
1093         FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1094                      fc_rport_state(rdata));
1095
1096         switch (rdata->rp_state) {
1097         case RPORT_ST_PRLI:
1098         case RPORT_ST_READY:
1099                 reason = ELS_RJT_NONE;
1100                 break;
1101         default:
1102                 fc_frame_free(rx_fp);
1103                 return;
1104                 break;
1105         }
1106         len = fr_len(rx_fp) - sizeof(*fh);
1107         pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1108         if (pp == NULL) {
1109                 reason = ELS_RJT_PROT;
1110                 explan = ELS_EXPL_INV_LEN;
1111         } else {
1112                 plen = ntohs(pp->prli.prli_len);
1113                 if ((plen % 4) != 0 || plen > len) {
1114                         reason = ELS_RJT_PROT;
1115                         explan = ELS_EXPL_INV_LEN;
1116                 } else if (plen < len) {
1117                         len = plen;
1118                 }
1119                 plen = pp->prli.prli_spp_len;
1120                 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1121                     plen > len || len < sizeof(*pp)) {
1122                         reason = ELS_RJT_PROT;
1123                         explan = ELS_EXPL_INV_LEN;
1124                 }
1125                 rspp = &pp->spp;
1126         }
1127         if (reason != ELS_RJT_NONE ||
1128             (fp = fc_frame_alloc(lport, len)) == NULL) {
1129                 rjt_data.reason = reason;
1130                 rjt_data.explan = explan;
1131                 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1132         } else {
1133                 sp = lport->tt.seq_start_next(sp);
1134                 WARN_ON(!sp);
1135                 pp = fc_frame_payload_get(fp, len);
1136                 WARN_ON(!pp);
1137                 memset(pp, 0, len);
1138                 pp->prli.prli_cmd = ELS_LS_ACC;
1139                 pp->prli.prli_spp_len = plen;
1140                 pp->prli.prli_len = htons(len);
1141                 len -= sizeof(struct fc_els_prli);
1142
1143                 /*
1144                  * Go through all the service parameter pages and build
1145                  * response.  If plen indicates longer SPP than standard,
1146                  * use that.  The entire response has been pre-cleared above.
1147                  */
1148                 spp = &pp->spp;
1149                 while (len >= plen) {
1150                         spp->spp_type = rspp->spp_type;
1151                         spp->spp_type_ext = rspp->spp_type_ext;
1152                         spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1153                         resp = FC_SPP_RESP_ACK;
1154                         if (rspp->spp_flags & FC_SPP_RPA_VAL)
1155                                 resp = FC_SPP_RESP_NO_PA;
1156                         switch (rspp->spp_type) {
1157                         case 0: /* common to all FC-4 types */
1158                                 break;
1159                         case FC_TYPE_FCP:
1160                                 fcp_parm = ntohl(rspp->spp_params);
1161                                 if (fcp_parm * FCP_SPPF_RETRY)
1162                                         rdata->flags |= FC_RP_FLAGS_RETRY;
1163                                 rdata->supported_classes = FC_COS_CLASS3;
1164                                 if (fcp_parm & FCP_SPPF_INIT_FCN)
1165                                         roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1166                                 if (fcp_parm & FCP_SPPF_TARG_FCN)
1167                                         roles |= FC_RPORT_ROLE_FCP_TARGET;
1168                                 rdata->ids.roles = roles;
1169
1170                                 spp->spp_params =
1171                                         htonl(lport->service_params);
1172                                 break;
1173                         default:
1174                                 resp = FC_SPP_RESP_INVL;
1175                                 break;
1176                         }
1177                         spp->spp_flags |= resp;
1178                         len -= plen;
1179                         rspp = (struct fc_els_spp *)((char *)rspp + plen);
1180                         spp = (struct fc_els_spp *)((char *)spp + plen);
1181                 }
1182
1183                 /*
1184                  * Send LS_ACC.  If this fails, the originator should retry.
1185                  */
1186                 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1187                 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1188                 ep = fc_seq_exch(sp);
1189                 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1190                                FC_TYPE_ELS, f_ctl, 0);
1191                 lport->tt.seq_send(lport, sp, fp);
1192
1193                 /*
1194                  * Get lock and re-check state.
1195                  */
1196                 switch (rdata->rp_state) {
1197                 case RPORT_ST_PRLI:
1198                         fc_rport_enter_ready(rdata);
1199                         break;
1200                 case RPORT_ST_READY:
1201                         break;
1202                 default:
1203                         break;
1204                 }
1205         }
1206         fc_frame_free(rx_fp);
1207 }
1208
1209 /**
1210  * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1211  * @rdata: private remote port data
1212  * @sp: current sequence in the PRLO exchange
1213  * @fp: PRLO request frame
1214  *
1215  * Locking Note: The rport lock is exected to be held before calling
1216  * this function.
1217  */
1218 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1219                                    struct fc_seq *sp,
1220                                    struct fc_frame *fp)
1221 {
1222         struct fc_lport *lport = rdata->local_port;
1223
1224         struct fc_frame_header *fh;
1225         struct fc_seq_els_data rjt_data;
1226
1227         fh = fc_frame_header_get(fp);
1228
1229         FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1230                      fc_rport_state(rdata));
1231
1232         if (rdata->rp_state == RPORT_ST_DELETE) {
1233                 fc_frame_free(fp);
1234                 return;
1235         }
1236
1237         rjt_data.fp = NULL;
1238         rjt_data.reason = ELS_RJT_UNAB;
1239         rjt_data.explan = ELS_EXPL_NONE;
1240         lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1241         fc_frame_free(fp);
1242 }
1243
1244 /**
1245  * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1246  * @rdata: private remote port data
1247  * @sp: current sequence in the LOGO exchange
1248  * @fp: LOGO request frame
1249  *
1250  * Locking Note: The rport lock is exected to be held before calling
1251  * this function.
1252  */
1253 static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata,
1254                                    struct fc_seq *sp,
1255                                    struct fc_frame *fp)
1256 {
1257         struct fc_frame_header *fh;
1258         struct fc_lport *lport = rdata->local_port;
1259
1260         fh = fc_frame_header_get(fp);
1261
1262         FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1263                      fc_rport_state(rdata));
1264
1265         if (rdata->rp_state == RPORT_ST_DELETE) {
1266                 fc_frame_free(fp);
1267                 return;
1268         }
1269
1270         fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1271
1272         lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1273         fc_frame_free(fp);
1274 }
1275
1276 static void fc_rport_flush_queue(void)
1277 {
1278         flush_workqueue(rport_event_queue);
1279 }
1280
1281 int fc_rport_init(struct fc_lport *lport)
1282 {
1283         if (!lport->tt.rport_create)
1284                 lport->tt.rport_create = fc_rport_create;
1285
1286         if (!lport->tt.rport_login)
1287                 lport->tt.rport_login = fc_rport_login;
1288
1289         if (!lport->tt.rport_logoff)
1290                 lport->tt.rport_logoff = fc_rport_logoff;
1291
1292         if (!lport->tt.rport_recv_req)
1293                 lport->tt.rport_recv_req = fc_rport_recv_req;
1294
1295         if (!lport->tt.rport_flush_queue)
1296                 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1297
1298         if (!lport->tt.rport_destroy)
1299                 lport->tt.rport_destroy = fc_rport_destroy;
1300
1301         return 0;
1302 }
1303 EXPORT_SYMBOL(fc_rport_init);
1304
1305 int fc_setup_rport(void)
1306 {
1307         rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1308         if (!rport_event_queue)
1309                 return -ENOMEM;
1310         return 0;
1311 }
1312 EXPORT_SYMBOL(fc_setup_rport);
1313
1314 void fc_destroy_rport(void)
1315 {
1316         destroy_workqueue(rport_event_queue);
1317 }
1318 EXPORT_SYMBOL(fc_destroy_rport);
1319
1320 void fc_rport_terminate_io(struct fc_rport *rport)
1321 {
1322         struct fc_rport_libfc_priv *rp = rport->dd_data;
1323         struct fc_lport *lport = rp->local_port;
1324
1325         lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1326         lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1327 }
1328 EXPORT_SYMBOL(fc_rport_terminate_io);