4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <asm/uaccess.h>
30 #include <asm/processor.h>
31 #include <linux/mempool.h>
34 #include "cifsproto.h"
35 #include "cifs_debug.h"
37 extern mempool_t *cifs_mid_poolp;
40 wake_up_task(struct mid_q_entry *mid)
42 wake_up_process(mid->callback_data);
46 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 struct mid_q_entry *temp;
51 cERROR(1, "Null TCP session in AllocMidQEntry");
55 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
59 memset(temp, 0, sizeof(struct mid_q_entry));
60 temp->mid = smb_buffer->Mid; /* always LE */
61 temp->pid = current->pid;
62 temp->command = smb_buffer->Command;
63 cFYI(1, "For smb_command %d", temp->command);
64 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
65 /* when mid allocated can be before when sent */
66 temp->when_alloc = jiffies;
69 * The default is for the mid to be synchronous, so the
70 * default callback just wakes up the current task.
72 temp->callback = wake_up_task;
73 temp->callback_data = current;
76 atomic_inc(&midCount);
77 temp->midState = MID_REQUEST_ALLOCATED;
82 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 #ifdef CONFIG_CIFS_STATS2
87 midEntry->midState = MID_FREE;
88 atomic_dec(&midCount);
89 if (midEntry->largeBuf)
90 cifs_buf_release(midEntry->resp_buf);
92 cifs_small_buf_release(midEntry->resp_buf);
93 #ifdef CONFIG_CIFS_STATS2
95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) {
98 if ((cifsFYI & CIFS_TIMER) &&
99 (midEntry->command != SMB_COM_LOCKING_ANDX)) {
100 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
101 midEntry->command, midEntry->mid);
102 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
103 now - midEntry->when_alloc,
104 now - midEntry->when_sent,
105 now - midEntry->when_received);
109 mempool_free(midEntry, cifs_mid_poolp);
113 delete_mid(struct mid_q_entry *mid)
115 spin_lock(&GlobalMid_Lock);
116 list_del(&mid->qhead);
117 spin_unlock(&GlobalMid_Lock);
119 DeleteMidQEntry(mid);
123 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
127 struct msghdr smb_msg;
128 struct smb_hdr *smb_buffer = iov[0].iov_base;
129 unsigned int len = iov[0].iov_len;
130 unsigned int total_len;
132 unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
133 struct socket *ssocket = server->ssocket;
136 return -ENOTSOCK; /* BB eventually add reconnect code here */
138 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
139 smb_msg.msg_namelen = sizeof(struct sockaddr);
140 smb_msg.msg_control = NULL;
141 smb_msg.msg_controllen = 0;
142 if (server->noblocksnd)
143 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
145 smb_msg.msg_flags = MSG_NOSIGNAL;
148 for (i = 0; i < n_vec; i++)
149 total_len += iov[i].iov_len;
151 cFYI(1, "Sending smb: total_len %d", total_len);
152 dump_smb(smb_buffer, len);
156 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
157 n_vec - first_vec, total_len);
158 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
160 /* if blocking send we try 3 times, since each can block
161 for 5 seconds. For nonblocking we have to try more
162 but wait increasing amounts of time allowing time for
163 socket to clear. The overall time we wait in either
164 case to send on the socket is about 15 seconds.
165 Similarly we wait for 15 seconds for
166 a response from the server in SendReceive[2]
167 for the server to send a response back for
168 most types of requests (except SMB Write
169 past end of file which can be slow, and
170 blocking lock operations). NFS waits slightly longer
171 than CIFS, but this can make it take longer for
172 nonresponsive servers to be detected and 15 seconds
173 is more than enough time for modern networks to
174 send a packet. In most cases if we fail to send
175 after the retries we will kill the socket and
176 reconnect which may clear the network problem.
178 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
179 cERROR(1, "sends on sock %p stuck for 15 seconds",
190 if (rc == total_len) {
193 } else if (rc > total_len) {
194 cERROR(1, "sent %d requested %d", rc, total_len);
198 /* should never happen, letting socket clear before
199 retrying is our only obvious option here */
200 cERROR(1, "tcp sent no data");
205 /* the line below resets i */
206 for (i = first_vec; i < n_vec; i++) {
207 if (iov[i].iov_len) {
208 if (rc > iov[i].iov_len) {
209 rc -= iov[i].iov_len;
212 iov[i].iov_base += rc;
213 iov[i].iov_len -= rc;
219 i = 0; /* in case we get ENOSPC on the next send */
222 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
223 cFYI(1, "partial send (%d remaining), terminating session",
225 /* If we have only sent part of an SMB then the next SMB
226 could be taken as the remainder of this one. We need
227 to kill the socket so the server throws away the partial
229 server->tcpStatus = CifsNeedReconnect;
232 if (rc < 0 && rc != -EINTR)
233 cERROR(1, "Error %d sending data on socket to server", rc);
237 /* Don't want to modify the buffer as a
238 side effect of this call. */
239 smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
245 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
253 return smb_sendv(server, &iov, 1);
256 static int wait_for_free_request(struct TCP_Server_Info *server,
259 if (long_op == CIFS_ASYNC_OP) {
260 /* oplock breaks must not be held up */
261 atomic_inc(&server->inFlight);
265 spin_lock(&GlobalMid_Lock);
267 if (atomic_read(&server->inFlight) >= cifs_max_pending) {
268 spin_unlock(&GlobalMid_Lock);
269 #ifdef CONFIG_CIFS_STATS2
270 atomic_inc(&server->num_waiters);
272 wait_event(server->request_q,
273 atomic_read(&server->inFlight)
275 #ifdef CONFIG_CIFS_STATS2
276 atomic_dec(&server->num_waiters);
278 spin_lock(&GlobalMid_Lock);
280 if (server->tcpStatus == CifsExiting) {
281 spin_unlock(&GlobalMid_Lock);
285 /* can not count locking commands against total
286 as they are allowed to block on server */
288 /* update # of requests on the wire to server */
289 if (long_op != CIFS_BLOCKING_OP)
290 atomic_inc(&server->inFlight);
291 spin_unlock(&GlobalMid_Lock);
298 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
299 struct mid_q_entry **ppmidQ)
301 if (ses->server->tcpStatus == CifsExiting) {
305 if (ses->server->tcpStatus == CifsNeedReconnect) {
306 cFYI(1, "tcp session dead - return to caller to retry");
310 if (ses->status != CifsGood) {
311 /* check if SMB session is bad because we are setting it up */
312 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
313 (in_buf->Command != SMB_COM_NEGOTIATE))
315 /* else ok - we are setting up session */
317 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
320 spin_lock(&GlobalMid_Lock);
321 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
322 spin_unlock(&GlobalMid_Lock);
327 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
331 error = wait_event_killable(server->response_q,
332 midQ->midState != MID_REQUEST_SUBMITTED);
341 * Send a SMB request and set the callback function in the mid to handle
342 * the result. Caller is responsible for dealing with timeouts.
345 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
346 unsigned int nvec, mid_callback_t *callback, void *cbdata,
350 struct mid_q_entry *mid;
351 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
353 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
357 /* enable signing if server requires it */
358 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
359 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
361 mutex_lock(&server->srv_mutex);
362 mid = AllocMidQEntry(hdr, server);
364 mutex_unlock(&server->srv_mutex);
368 /* put it on the pending_mid_q */
369 spin_lock(&GlobalMid_Lock);
370 list_add_tail(&mid->qhead, &server->pending_mid_q);
371 spin_unlock(&GlobalMid_Lock);
373 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
375 mutex_unlock(&server->srv_mutex);
379 mid->callback = callback;
380 mid->callback_data = cbdata;
381 mid->midState = MID_REQUEST_SUBMITTED;
382 #ifdef CONFIG_CIFS_STATS2
383 atomic_inc(&server->inSend);
385 rc = smb_sendv(server, iov, nvec);
386 #ifdef CONFIG_CIFS_STATS2
387 atomic_dec(&server->inSend);
388 mid->when_sent = jiffies;
390 mutex_unlock(&server->srv_mutex);
397 atomic_dec(&server->inFlight);
398 wake_up(&server->request_q);
404 * Send an SMB Request. No response info (other than return code)
405 * needs to be parsed.
407 * flags indicate the type of request buffer and how long to wait
408 * and whether to log NT STATUS code (error) before mapping it to POSIX error
412 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
413 struct smb_hdr *in_buf, int flags)
419 iov[0].iov_base = (char *)in_buf;
420 iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
421 flags |= CIFS_NO_RESP;
422 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
423 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
429 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
433 cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
434 mid->mid, mid->midState);
436 spin_lock(&GlobalMid_Lock);
437 switch (mid->midState) {
438 case MID_RESPONSE_RECEIVED:
439 spin_unlock(&GlobalMid_Lock);
441 case MID_RETRY_NEEDED:
444 case MID_RESPONSE_MALFORMED:
451 list_del_init(&mid->qhead);
452 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
453 mid->mid, mid->midState);
456 spin_unlock(&GlobalMid_Lock);
458 DeleteMidQEntry(mid);
463 * An NT cancel request header looks just like the original request except:
465 * The Command is SMB_COM_NT_CANCEL
466 * The WordCount is zeroed out
467 * The ByteCount is zeroed out
469 * This function mangles an existing request buffer into a
470 * SMB_COM_NT_CANCEL request and then sends it.
473 send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
474 struct mid_q_entry *mid)
478 /* -4 for RFC1001 length and +2 for BCC field */
479 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
480 in_buf->Command = SMB_COM_NT_CANCEL;
481 in_buf->WordCount = 0;
484 mutex_lock(&server->srv_mutex);
485 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
487 mutex_unlock(&server->srv_mutex);
490 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
491 mutex_unlock(&server->srv_mutex);
493 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
500 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
503 dump_smb(mid->resp_buf,
504 min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
506 /* convert the length into a more usable form */
507 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
508 /* FIXME: add code to kill session */
509 if (cifs_verify_signature(mid->resp_buf, server,
510 mid->sequence_number + 1) != 0)
511 cERROR(1, "Unexpected SMB signature");
514 /* BB special case reconnect tid and uid here? */
515 return map_smb_to_linux_error(mid->resp_buf, log_error);
519 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
520 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
525 struct mid_q_entry *midQ;
526 struct smb_hdr *in_buf = iov[0].iov_base;
528 long_op = flags & CIFS_TIMEOUT_MASK;
530 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
532 if ((ses == NULL) || (ses->server == NULL)) {
533 cifs_small_buf_release(in_buf);
534 cERROR(1, "Null session");
538 if (ses->server->tcpStatus == CifsExiting) {
539 cifs_small_buf_release(in_buf);
543 /* Ensure that we do not send more than 50 overlapping requests
544 to the same server. We may make this configurable later or
547 rc = wait_for_free_request(ses->server, long_op);
549 cifs_small_buf_release(in_buf);
553 /* make sure that we sign in the same order that we send on this socket
554 and avoid races inside tcp sendmsg code that could cause corruption
557 mutex_lock(&ses->server->srv_mutex);
559 rc = allocate_mid(ses, in_buf, &midQ);
561 mutex_unlock(&ses->server->srv_mutex);
562 cifs_small_buf_release(in_buf);
563 /* Update # of requests on wire to server */
564 atomic_dec(&ses->server->inFlight);
565 wake_up(&ses->server->request_q);
568 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
570 mutex_unlock(&ses->server->srv_mutex);
571 cifs_small_buf_release(in_buf);
575 midQ->midState = MID_REQUEST_SUBMITTED;
576 #ifdef CONFIG_CIFS_STATS2
577 atomic_inc(&ses->server->inSend);
579 rc = smb_sendv(ses->server, iov, n_vec);
580 #ifdef CONFIG_CIFS_STATS2
581 atomic_dec(&ses->server->inSend);
582 midQ->when_sent = jiffies;
585 mutex_unlock(&ses->server->srv_mutex);
588 cifs_small_buf_release(in_buf);
592 if (long_op == CIFS_ASYNC_OP) {
593 cifs_small_buf_release(in_buf);
597 rc = wait_for_response(ses->server, midQ);
599 send_nt_cancel(ses->server, in_buf, midQ);
600 spin_lock(&GlobalMid_Lock);
601 if (midQ->midState == MID_REQUEST_SUBMITTED) {
602 midQ->callback = DeleteMidQEntry;
603 spin_unlock(&GlobalMid_Lock);
604 cifs_small_buf_release(in_buf);
605 atomic_dec(&ses->server->inFlight);
606 wake_up(&ses->server->request_q);
609 spin_unlock(&GlobalMid_Lock);
612 cifs_small_buf_release(in_buf);
614 rc = cifs_sync_mid_result(midQ, ses->server);
616 atomic_dec(&ses->server->inFlight);
617 wake_up(&ses->server->request_q);
621 if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
623 cFYI(1, "Bad MID state?");
627 iov[0].iov_base = (char *)midQ->resp_buf;
628 iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
630 *pRespBufType = CIFS_LARGE_BUFFER;
632 *pRespBufType = CIFS_SMALL_BUFFER;
634 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
636 /* mark it so buf will not be freed by delete_mid */
637 if ((flags & CIFS_NO_RESP) == 0)
638 midQ->resp_buf = NULL;
641 atomic_dec(&ses->server->inFlight);
642 wake_up(&ses->server->request_q);
648 SendReceive(const unsigned int xid, struct cifs_ses *ses,
649 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
650 int *pbytes_returned, const int long_op)
653 struct mid_q_entry *midQ;
656 cERROR(1, "Null smb session");
659 if (ses->server == NULL) {
660 cERROR(1, "Null tcp session");
664 if (ses->server->tcpStatus == CifsExiting)
667 /* Ensure that we do not send more than 50 overlapping requests
668 to the same server. We may make this configurable later or
671 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
672 MAX_CIFS_HDR_SIZE - 4) {
673 cERROR(1, "Illegal length, greater than maximum frame, %d",
674 be32_to_cpu(in_buf->smb_buf_length));
678 rc = wait_for_free_request(ses->server, long_op);
682 /* make sure that we sign in the same order that we send on this socket
683 and avoid races inside tcp sendmsg code that could cause corruption
686 mutex_lock(&ses->server->srv_mutex);
688 rc = allocate_mid(ses, in_buf, &midQ);
690 mutex_unlock(&ses->server->srv_mutex);
691 /* Update # of requests on wire to server */
692 atomic_dec(&ses->server->inFlight);
693 wake_up(&ses->server->request_q);
697 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
699 mutex_unlock(&ses->server->srv_mutex);
703 midQ->midState = MID_REQUEST_SUBMITTED;
704 #ifdef CONFIG_CIFS_STATS2
705 atomic_inc(&ses->server->inSend);
707 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
708 #ifdef CONFIG_CIFS_STATS2
709 atomic_dec(&ses->server->inSend);
710 midQ->when_sent = jiffies;
712 mutex_unlock(&ses->server->srv_mutex);
717 if (long_op == CIFS_ASYNC_OP)
720 rc = wait_for_response(ses->server, midQ);
722 send_nt_cancel(ses->server, in_buf, midQ);
723 spin_lock(&GlobalMid_Lock);
724 if (midQ->midState == MID_REQUEST_SUBMITTED) {
725 /* no longer considered to be "in-flight" */
726 midQ->callback = DeleteMidQEntry;
727 spin_unlock(&GlobalMid_Lock);
728 atomic_dec(&ses->server->inFlight);
729 wake_up(&ses->server->request_q);
732 spin_unlock(&GlobalMid_Lock);
735 rc = cifs_sync_mid_result(midQ, ses->server);
737 atomic_dec(&ses->server->inFlight);
738 wake_up(&ses->server->request_q);
742 if (!midQ->resp_buf || !out_buf ||
743 midQ->midState != MID_RESPONSE_RECEIVED) {
745 cERROR(1, "Bad MID state?");
749 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
750 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
751 rc = cifs_check_receive(midQ, ses->server, 0);
754 atomic_dec(&ses->server->inFlight);
755 wake_up(&ses->server->request_q);
760 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
761 blocking lock to return. */
764 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
765 struct smb_hdr *in_buf,
766 struct smb_hdr *out_buf)
769 struct cifs_ses *ses = tcon->ses;
770 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
772 /* We just modify the current in_buf to change
773 the type of lock from LOCKING_ANDX_SHARED_LOCK
774 or LOCKING_ANDX_EXCLUSIVE_LOCK to
775 LOCKING_ANDX_CANCEL_LOCK. */
777 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
779 pSMB->hdr.Mid = GetNextMid(ses->server);
781 return SendReceive(xid, ses, in_buf, out_buf,
786 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
787 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
788 int *pbytes_returned)
792 struct mid_q_entry *midQ;
793 struct cifs_ses *ses;
795 if (tcon == NULL || tcon->ses == NULL) {
796 cERROR(1, "Null smb session");
801 if (ses->server == NULL) {
802 cERROR(1, "Null tcp session");
806 if (ses->server->tcpStatus == CifsExiting)
809 /* Ensure that we do not send more than 50 overlapping requests
810 to the same server. We may make this configurable later or
813 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
814 MAX_CIFS_HDR_SIZE - 4) {
815 cERROR(1, "Illegal length, greater than maximum frame, %d",
816 be32_to_cpu(in_buf->smb_buf_length));
820 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
824 /* make sure that we sign in the same order that we send on this socket
825 and avoid races inside tcp sendmsg code that could cause corruption
828 mutex_lock(&ses->server->srv_mutex);
830 rc = allocate_mid(ses, in_buf, &midQ);
832 mutex_unlock(&ses->server->srv_mutex);
836 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
839 mutex_unlock(&ses->server->srv_mutex);
843 midQ->midState = MID_REQUEST_SUBMITTED;
844 #ifdef CONFIG_CIFS_STATS2
845 atomic_inc(&ses->server->inSend);
847 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
848 #ifdef CONFIG_CIFS_STATS2
849 atomic_dec(&ses->server->inSend);
850 midQ->when_sent = jiffies;
852 mutex_unlock(&ses->server->srv_mutex);
859 /* Wait for a reply - allow signals to interrupt. */
860 rc = wait_event_interruptible(ses->server->response_q,
861 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
862 ((ses->server->tcpStatus != CifsGood) &&
863 (ses->server->tcpStatus != CifsNew)));
865 /* Were we interrupted by a signal ? */
866 if ((rc == -ERESTARTSYS) &&
867 (midQ->midState == MID_REQUEST_SUBMITTED) &&
868 ((ses->server->tcpStatus == CifsGood) ||
869 (ses->server->tcpStatus == CifsNew))) {
871 if (in_buf->Command == SMB_COM_TRANSACTION2) {
872 /* POSIX lock. We send a NT_CANCEL SMB to cause the
873 blocking lock to return. */
874 rc = send_nt_cancel(ses->server, in_buf, midQ);
880 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
881 to cause the blocking lock to return. */
883 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
885 /* If we get -ENOLCK back the lock may have
886 already been removed. Don't exit in this case. */
887 if (rc && rc != -ENOLCK) {
893 rc = wait_for_response(ses->server, midQ);
895 send_nt_cancel(ses->server, in_buf, midQ);
896 spin_lock(&GlobalMid_Lock);
897 if (midQ->midState == MID_REQUEST_SUBMITTED) {
898 /* no longer considered to be "in-flight" */
899 midQ->callback = DeleteMidQEntry;
900 spin_unlock(&GlobalMid_Lock);
903 spin_unlock(&GlobalMid_Lock);
906 /* We got the response - restart system call. */
910 rc = cifs_sync_mid_result(midQ, ses->server);
914 /* rcvd frame is ok */
915 if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
917 cERROR(1, "Bad MID state?");
921 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
922 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
923 rc = cifs_check_receive(midQ, ses->server, 0);
926 if (rstart && rc == -EACCES)