adbb4b80d9e44ba92009931727399ad6f2d954c4
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / isci / remote_node_context.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include "host.h"
57 #include "isci.h"
58 #include "remote_device.h"
59 #include "remote_node_context.h"
60 #include "scu_event_codes.h"
61 #include "scu_task_context.h"
62
63 #undef C
64 #define C(a) (#a)
65 const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
66 {
67         static const char * const strings[] = RNC_STATES;
68
69         return strings[state];
70 }
71 #undef C
72
73 /**
74  *
75  * @sci_rnc: The state of the remote node context object to check.
76  *
77  * This method will return true if the remote node context is in a READY state
78  * otherwise it will return false bool true if the remote node context is in
79  * the ready state. false if the remote node context is not in the ready state.
80  */
81 bool sci_remote_node_context_is_ready(
82         struct sci_remote_node_context *sci_rnc)
83 {
84         u32 current_state = sci_rnc->sm.current_state_id;
85
86         if (current_state == SCI_RNC_READY) {
87                 return true;
88         }
89
90         return false;
91 }
92
93 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
94 {
95         if (id < ihost->remote_node_entries &&
96             ihost->device_table[id])
97                 return &ihost->remote_node_context_table[id];
98
99         return NULL;
100 }
101
102 static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
103 {
104         struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
105         struct domain_device *dev = idev->domain_dev;
106         int rni = sci_rnc->remote_node_index;
107         union scu_remote_node_context *rnc;
108         struct isci_host *ihost;
109         __le64 sas_addr;
110
111         ihost = idev->owning_port->owning_controller;
112         rnc = sci_rnc_by_id(ihost, rni);
113
114         memset(rnc, 0, sizeof(union scu_remote_node_context)
115                 * sci_remote_device_node_count(idev));
116
117         rnc->ssp.remote_node_index = rni;
118         rnc->ssp.remote_node_port_width = idev->device_port_width;
119         rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
120
121         /* sas address is __be64, context ram format is __le64 */
122         sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
123         rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
124         rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
125
126         rnc->ssp.nexus_loss_timer_enable = true;
127         rnc->ssp.check_bit               = false;
128         rnc->ssp.is_valid                = false;
129         rnc->ssp.is_remote_node_context  = true;
130         rnc->ssp.function_number         = 0;
131
132         rnc->ssp.arbitration_wait_time = 0;
133
134         if (dev_is_sata(dev)) {
135                 rnc->ssp.connection_occupancy_timeout =
136                         ihost->user_parameters.stp_max_occupancy_timeout;
137                 rnc->ssp.connection_inactivity_timeout =
138                         ihost->user_parameters.stp_inactivity_timeout;
139         } else {
140                 rnc->ssp.connection_occupancy_timeout  =
141                         ihost->user_parameters.ssp_max_occupancy_timeout;
142                 rnc->ssp.connection_inactivity_timeout =
143                         ihost->user_parameters.ssp_inactivity_timeout;
144         }
145
146         rnc->ssp.initial_arbitration_wait_time = 0;
147
148         /* Open Address Frame Parameters */
149         rnc->ssp.oaf_connection_rate = idev->connection_rate;
150         rnc->ssp.oaf_features = 0;
151         rnc->ssp.oaf_source_zone_group = 0;
152         rnc->ssp.oaf_more_compatibility_features = 0;
153 }
154
155 /**
156  *
157  * @sci_rnc:
158  * @callback:
159  * @callback_parameter:
160  *
161  * This method will setup the remote node context object so it will transition
162  * to its ready state.  If the remote node context is already setup to
163  * transition to its final state then this function does nothing. none
164  */
165 static void sci_remote_node_context_setup_to_resume(
166         struct sci_remote_node_context *sci_rnc,
167         scics_sds_remote_node_context_callback callback,
168         void *callback_parameter,
169         enum sci_remote_node_context_destination_state dest_param)
170 {
171         if (sci_rnc->destination_state != RNC_DEST_FINAL) {
172                 sci_rnc->destination_state = dest_param;
173                 if (callback != NULL) {
174                         sci_rnc->user_callback = callback;
175                         sci_rnc->user_cookie   = callback_parameter;
176                 }
177         }
178 }
179
180 static void sci_remote_node_context_setup_to_destroy(
181         struct sci_remote_node_context *sci_rnc,
182         scics_sds_remote_node_context_callback callback,
183         void *callback_parameter)
184 {
185         sci_rnc->destination_state = RNC_DEST_FINAL;
186         sci_rnc->user_callback     = callback;
187         sci_rnc->user_cookie       = callback_parameter;
188 }
189
190 /**
191  *
192  *
193  * This method just calls the user callback function and then resets the
194  * callback.
195  */
196 static void sci_remote_node_context_notify_user(
197         struct sci_remote_node_context *rnc)
198 {
199         if (rnc->user_callback != NULL) {
200                 (*rnc->user_callback)(rnc->user_cookie);
201
202                 rnc->user_callback = NULL;
203                 rnc->user_cookie = NULL;
204         }
205 }
206
207 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
208 {
209         if ((rnc->destination_state == RNC_DEST_READY) ||
210             (rnc->destination_state == RNC_DEST_SUSPENDED_RESUME)) {
211                 rnc->destination_state = RNC_DEST_READY;
212                 sci_remote_node_context_resume(rnc, rnc->user_callback,
213                                                     rnc->user_cookie);
214         } else
215                 rnc->destination_state = RNC_DEST_UNSPECIFIED;
216 }
217
218 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
219 {
220         union scu_remote_node_context *rnc_buffer;
221         struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
222         struct domain_device *dev = idev->domain_dev;
223         struct isci_host *ihost = idev->owning_port->owning_controller;
224
225         rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
226
227         rnc_buffer->ssp.is_valid = true;
228
229         if (dev_is_sata(dev) && dev->parent) {
230                 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
231         } else {
232                 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
233
234                 if (!dev->parent)
235                         sci_port_setup_transports(idev->owning_port,
236                                                   sci_rnc->remote_node_index);
237         }
238 }
239
240 static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
241 {
242         union scu_remote_node_context *rnc_buffer;
243         struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
244         struct isci_host *ihost = idev->owning_port->owning_controller;
245
246         rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
247
248         rnc_buffer->ssp.is_valid = false;
249
250         sci_remote_device_post_request(rnc_to_dev(sci_rnc),
251                                        SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
252 }
253
254 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
255 {
256         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
257
258         /* Check to see if we have gotten back to the initial state because
259          * someone requested to destroy the remote node context object.
260          */
261         if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
262                 rnc->destination_state = RNC_DEST_UNSPECIFIED;
263                 sci_remote_node_context_notify_user(rnc);
264         }
265 }
266
267 static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
268 {
269         struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
270
271         sci_remote_node_context_validate_context_buffer(sci_rnc);
272 }
273
274 static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
275 {
276         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
277
278         /* Terminate all outstanding requests. */
279         sci_remote_device_terminate_requests(rnc_to_dev(rnc));
280         sci_remote_node_context_invalidate_context_buffer(rnc);
281 }
282
283 static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
284 {
285         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
286         struct isci_remote_device *idev;
287         struct domain_device *dev;
288
289         idev = rnc_to_dev(rnc);
290         dev = idev->domain_dev;
291
292         /*
293          * For direct attached SATA devices we need to clear the TLCR
294          * NCQ to TCi tag mapping on the phy and in cases where we
295          * resume because of a target reset we also need to update
296          * the STPTLDARNI register with the RNi of the device
297          */
298         if (dev_is_sata(dev) && !dev->parent)
299                 sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
300
301         sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
302 }
303
304 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
305 {
306         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
307         enum sci_remote_node_context_destination_state dest_select;
308         scics_sds_remote_node_context_callback usr_cb = rnc->user_callback;
309         void *usr_param = rnc->user_cookie;
310         int tell_user = 1;
311
312         dest_select = rnc->destination_state;
313         rnc->destination_state = RNC_DEST_UNSPECIFIED;
314
315         if ((dest_select == RNC_DEST_SUSPENDED) ||
316             (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
317                 sci_remote_node_context_suspend(
318                         rnc, SCI_SOFTWARE_SUSPENSION,
319                         SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, NULL, NULL);
320
321                 if (dest_select == RNC_DEST_SUSPENDED_RESUME) {
322                         sci_remote_node_context_resume(rnc, usr_cb, usr_param);
323                         tell_user = 0;  /* Wait until ready again. */
324                 }
325         }
326         if (tell_user && rnc->user_callback)
327                 sci_remote_node_context_notify_user(rnc);
328 }
329
330 static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
331 {
332         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
333
334         sci_remote_node_context_continue_state_transitions(rnc);
335 }
336
337 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
338 {
339         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
340         struct isci_remote_device *idev = rnc_to_dev(rnc);
341         struct isci_host *ihost = idev->owning_port->owning_controller;
342
343         /* Terminate outstanding requests pending abort. */
344         sci_remote_device_abort_requests_pending_abort(idev);
345
346         wake_up(&ihost->eventq);
347         sci_remote_node_context_continue_state_transitions(rnc);
348 }
349
350 static void sci_remote_node_context_await_suspend_state_exit(
351         struct sci_base_state_machine *sm)
352 {
353         struct sci_remote_node_context *rnc
354                 = container_of(sm, typeof(*rnc), sm);
355
356         isci_dev_set_hang_detection_timeout(rnc_to_dev(rnc), 0);
357 }
358
359 static const struct sci_base_state sci_remote_node_context_state_table[] = {
360         [SCI_RNC_INITIAL] = {
361                 .enter_state = sci_remote_node_context_initial_state_enter,
362         },
363         [SCI_RNC_POSTING] = {
364                 .enter_state = sci_remote_node_context_posting_state_enter,
365         },
366         [SCI_RNC_INVALIDATING] = {
367                 .enter_state = sci_remote_node_context_invalidating_state_enter,
368         },
369         [SCI_RNC_RESUMING] = {
370                 .enter_state = sci_remote_node_context_resuming_state_enter,
371         },
372         [SCI_RNC_READY] = {
373                 .enter_state = sci_remote_node_context_ready_state_enter,
374         },
375         [SCI_RNC_TX_SUSPENDED] = {
376                 .enter_state = sci_remote_node_context_tx_suspended_state_enter,
377         },
378         [SCI_RNC_TX_RX_SUSPENDED] = {
379                 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
380         },
381         [SCI_RNC_AWAIT_SUSPENSION] = {
382                 .exit_state = sci_remote_node_context_await_suspend_state_exit,
383         },
384 };
385
386 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
387                                             u16 remote_node_index)
388 {
389         memset(rnc, 0, sizeof(struct sci_remote_node_context));
390
391         rnc->remote_node_index = remote_node_index;
392         rnc->destination_state = RNC_DEST_UNSPECIFIED;
393
394         sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
395 }
396
397 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
398                                                            u32 event_code)
399 {
400         enum scis_sds_remote_node_context_states state;
401         u32 next_state;
402
403         state = sci_rnc->sm.current_state_id;
404         switch (state) {
405         case SCI_RNC_POSTING:
406                 switch (scu_get_event_code(event_code)) {
407                 case SCU_EVENT_POST_RNC_COMPLETE:
408                         sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
409                         break;
410                 default:
411                         goto out;
412                 }
413                 break;
414         case SCI_RNC_INVALIDATING:
415                 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
416                         if (sci_rnc->destination_state == RNC_DEST_FINAL)
417                                 next_state = SCI_RNC_INITIAL;
418                         else
419                                 next_state = SCI_RNC_POSTING;
420                         sci_change_state(&sci_rnc->sm, next_state);
421                 } else {
422                         switch (scu_get_event_type(event_code)) {
423                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
424                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
425                                 /* We really dont care if the hardware is going to suspend
426                                  * the device since it's being invalidated anyway */
427                                 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
428                                         "%s: SCIC Remote Node Context 0x%p was "
429                                         "suspeneded by hardware while being "
430                                         "invalidated.\n", __func__, sci_rnc);
431                                 break;
432                         default:
433                                 goto out;
434                         }
435                 }
436                 break;
437         case SCI_RNC_RESUMING:
438                 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
439                         sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
440                 } else {
441                         switch (scu_get_event_type(event_code)) {
442                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
443                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
444                                 /* We really dont care if the hardware is going to suspend
445                                  * the device since it's being resumed anyway */
446                                 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
447                                         "%s: SCIC Remote Node Context 0x%p was "
448                                         "suspeneded by hardware while being resumed.\n",
449                                         __func__, sci_rnc);
450                                 break;
451                         default:
452                                 goto out;
453                         }
454                 }
455                 break;
456         case SCI_RNC_READY:
457                 switch (scu_get_event_type(event_code)) {
458                 case SCU_EVENT_TL_RNC_SUSPEND_TX:
459                         sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
460                         sci_rnc->suspend_type = scu_get_event_type(event_code);
461                         break;
462                 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
463                         sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
464                         sci_rnc->suspend_type = scu_get_event_type(event_code);
465                         break;
466                 default:
467                         goto out;
468                 }
469                 break;
470         case SCI_RNC_AWAIT_SUSPENSION:
471                 switch (scu_get_event_type(event_code)) {
472                 case SCU_EVENT_TL_RNC_SUSPEND_TX:
473                         next_state = SCI_RNC_TX_SUSPENDED;
474                         break;
475                 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
476                         next_state = SCI_RNC_TX_RX_SUSPENDED;
477                         break;
478                 default:
479                         goto out;
480                 }
481                 if (sci_rnc->suspend_type == scu_get_event_type(event_code))
482                         sci_change_state(&sci_rnc->sm, next_state);
483                 break;
484         default:
485                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
486                          "%s: invalid state: %s\n", __func__,
487                          rnc_state_name(state));
488                 return SCI_FAILURE_INVALID_STATE;
489         }
490         return SCI_SUCCESS;
491
492  out:
493         dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
494                  "%s: code: %#x state: %s\n", __func__, event_code,
495                  rnc_state_name(state));
496         return SCI_FAILURE;
497
498 }
499
500 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
501                                                       scics_sds_remote_node_context_callback cb_fn,
502                                                       void *cb_p)
503 {
504         enum scis_sds_remote_node_context_states state;
505
506         state = sci_rnc->sm.current_state_id;
507         switch (state) {
508         case SCI_RNC_INVALIDATING:
509                 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
510                 return SCI_SUCCESS;
511         case SCI_RNC_POSTING:
512         case SCI_RNC_RESUMING:
513         case SCI_RNC_READY:
514         case SCI_RNC_TX_SUSPENDED:
515         case SCI_RNC_TX_RX_SUSPENDED:
516         case SCI_RNC_AWAIT_SUSPENSION:
517                 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
518                 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
519                 return SCI_SUCCESS;
520         case SCI_RNC_INITIAL:
521                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
522                          "%s: invalid state: %s\n", __func__,
523                          rnc_state_name(state));
524                 /* We have decided that the destruct request on the remote node context
525                  * can not fail since it is either in the initial/destroyed state or is
526                  * can be destroyed.
527                  */
528                 return SCI_SUCCESS;
529         default:
530                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
531                          "%s: invalid state %s\n", __func__,
532                          rnc_state_name(state));
533                 return SCI_FAILURE_INVALID_STATE;
534         }
535 }
536
537 enum sci_status sci_remote_node_context_suspend(
538                         struct sci_remote_node_context *sci_rnc,
539                         enum sci_remote_node_suspension_reasons suspend_reason,
540                         u32 suspend_type,
541                         scics_sds_remote_node_context_callback cb_fn,
542                         void *cb_p)
543 {
544         enum scis_sds_remote_node_context_states state
545                 = sci_rnc->sm.current_state_id;
546         struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
547         enum sci_status status = SCI_FAILURE_INVALID_STATE;
548         enum sci_remote_node_context_destination_state dest_param =
549                 RNC_DEST_UNSPECIFIED;
550
551         dev_dbg(scirdev_to_dev(idev),
552                 "%s: current state %d, current suspend_type %x dest state %d,"
553                         " arg suspend_reason %d, arg suspend_type %x",
554                 __func__, state, sci_rnc->suspend_type,
555                 sci_rnc->destination_state, suspend_reason,
556                 suspend_type);
557
558         /* Disable automatic state continuations if explicitly suspending. */
559         if ((suspend_reason != SCI_SOFTWARE_SUSPENSION) ||
560             (sci_rnc->destination_state == RNC_DEST_FINAL))
561                 dest_param = sci_rnc->destination_state;
562
563         switch (state) {
564         case SCI_RNC_RESUMING:
565                 break;  /* The RNC has been posted, so start the suspend. */
566         case SCI_RNC_READY:
567                 break;
568         case SCI_RNC_INVALIDATING:
569                 if (sci_rnc->destination_state == RNC_DEST_FINAL) {
570                         dev_warn(scirdev_to_dev(idev),
571                                  "%s: already destroying %p\n",
572                                  __func__, sci_rnc);
573                         return SCI_FAILURE_INVALID_STATE;
574                 }
575                 /* Fall through and handle like SCI_RNC_POSTING */
576         case SCI_RNC_POSTING:
577                 /* Set the destination state to AWAIT - this signals the
578                  * entry into the SCI_RNC_READY state that a suspension
579                  * needs to be done immediately.
580                  */
581                 sci_rnc->destination_state = RNC_DEST_SUSPENDED;
582                 return SCI_SUCCESS;
583
584         case SCI_RNC_TX_SUSPENDED:
585                 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
586                         status = SCI_SUCCESS;
587                 break;
588         case SCI_RNC_TX_RX_SUSPENDED:
589                 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
590                         status = SCI_SUCCESS;
591                 break;
592         case SCI_RNC_AWAIT_SUSPENSION:
593                 if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
594                     || (suspend_type == sci_rnc->suspend_type))
595                         return SCI_SUCCESS;
596                 break;
597         default:
598                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
599                          "%s: invalid state %s\n", __func__,
600                          rnc_state_name(state));
601                 return SCI_FAILURE_INVALID_STATE;
602         }
603         sci_rnc->destination_state = dest_param;
604         sci_rnc->user_callback = cb_fn;
605         sci_rnc->user_cookie   = cb_p;
606         sci_rnc->suspend_type  = suspend_type;
607
608         if (status == SCI_SUCCESS) { /* Already in the destination state? */
609                 struct isci_host *ihost = idev->owning_port->owning_controller;
610
611                 sci_remote_node_context_notify_user(sci_rnc);
612                 wake_up_all(&ihost->eventq); /* Let observers look. */
613                 return SCI_SUCCESS;
614         }
615         if (suspend_reason == SCI_SOFTWARE_SUSPENSION) {
616                 isci_dev_set_hang_detection_timeout(idev, 0x00000001);
617                 sci_remote_device_post_request(
618                         idev, SCI_SOFTWARE_SUSPEND_CMD);
619         }
620         if (state != SCI_RNC_AWAIT_SUSPENSION)
621                 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
622
623         return SCI_SUCCESS;
624 }
625
626 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
627                                                     scics_sds_remote_node_context_callback cb_fn,
628                                                     void *cb_p)
629 {
630         enum scis_sds_remote_node_context_states state;
631
632         state = sci_rnc->sm.current_state_id;
633         switch (state) {
634         case SCI_RNC_INITIAL:
635                 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
636                         return SCI_FAILURE_INVALID_STATE;
637
638                 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
639                                                         RNC_DEST_READY);
640                 sci_remote_node_context_construct_buffer(sci_rnc);
641                 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
642                 return SCI_SUCCESS;
643         case SCI_RNC_POSTING:
644         case SCI_RNC_INVALIDATING:
645         case SCI_RNC_RESUMING:
646                 /* We are still waiting to post when a resume was requested. */
647                 switch (sci_rnc->destination_state) {
648                 case RNC_DEST_SUSPENDED:
649                 case RNC_DEST_SUSPENDED_RESUME:
650                         /* Previously waiting to suspend after posting.  Now
651                          * continue onto resumption.
652                          */
653                         sci_remote_node_context_setup_to_resume(
654                                 sci_rnc, cb_fn, cb_p,
655                                 RNC_DEST_SUSPENDED_RESUME);
656                         break;
657                 default:
658                         sci_remote_node_context_setup_to_resume(
659                                 sci_rnc, cb_fn, cb_p,
660                                 RNC_DEST_READY);
661                         break;
662                 }
663                 return SCI_SUCCESS;
664         case SCI_RNC_TX_SUSPENDED:
665         case SCI_RNC_TX_RX_SUSPENDED: {
666                 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
667                 struct domain_device *dev = idev->domain_dev;
668
669                 /* If this is an expander attached SATA device we must
670                  * invalidate and repost the RNC since this is the only way
671                  * to clear the TCi to NCQ tag mapping table for the RNi.
672                  * All other device types we can just resume.
673                  */
674                 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
675                                                         RNC_DEST_READY);
676
677                 if (dev_is_sata(dev) && dev->parent)
678                         sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
679                 else
680                         sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
681                 return SCI_SUCCESS;
682         }
683         case SCI_RNC_AWAIT_SUSPENSION:
684                 sci_remote_node_context_setup_to_resume(
685                         sci_rnc, cb_fn, cb_p,
686                         RNC_DEST_SUSPENDED_RESUME);
687                 return SCI_SUCCESS;
688         default:
689                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
690                          "%s: invalid state %s\n", __func__,
691                          rnc_state_name(state));
692                 return SCI_FAILURE_INVALID_STATE;
693         }
694 }
695
696 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
697                                                              struct isci_request *ireq)
698 {
699         enum scis_sds_remote_node_context_states state;
700
701         state = sci_rnc->sm.current_state_id;
702
703         switch (state) {
704         case SCI_RNC_READY:
705                 return SCI_SUCCESS;
706         case SCI_RNC_TX_SUSPENDED:
707         case SCI_RNC_TX_RX_SUSPENDED:
708         case SCI_RNC_AWAIT_SUSPENSION:
709                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
710                          "%s: invalid state %s\n", __func__,
711                          rnc_state_name(state));
712                 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
713         default:
714                 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
715                         "%s: invalid state %s\n", __func__,
716                         rnc_state_name(state));
717                 return SCI_FAILURE_INVALID_STATE;
718         }
719 }
720
721 enum sci_status sci_remote_node_context_start_task(
722         struct sci_remote_node_context *sci_rnc,
723         struct isci_request *ireq,
724         scics_sds_remote_node_context_callback cb_fn,
725         void *cb_p)
726 {
727         enum sci_status status = sci_remote_node_context_resume(sci_rnc,
728                                                                 cb_fn, cb_p);
729         if (status != SCI_SUCCESS)
730                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
731                         "%s: resume failed: %d\n", __func__, status);
732         return status;
733 }
734
735 int sci_remote_node_context_is_safe_to_abort(
736         struct sci_remote_node_context *sci_rnc)
737 {
738         enum scis_sds_remote_node_context_states state;
739
740         state = sci_rnc->sm.current_state_id;
741         switch (state) {
742         case SCI_RNC_INVALIDATING:
743         case SCI_RNC_TX_RX_SUSPENDED:
744                 return 1;
745         case SCI_RNC_POSTING:
746         case SCI_RNC_RESUMING:
747         case SCI_RNC_READY:
748         case SCI_RNC_TX_SUSPENDED:
749         case SCI_RNC_AWAIT_SUSPENSION:
750         case SCI_RNC_INITIAL:
751                 return 0;
752         default:
753                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
754                          "%s: invalid state %d\n", __func__, state);
755                 return 0;
756         }
757 }