Merge remote-tracking branches 'asoc/topic/sh', 'asoc/topic/sigmadsp', 'asoc/topic...
[firefly-linux-kernel-4.4.55.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OP_FRONT_LEN        4096
23 #define OSD_OPREPLY_FRONT_LEN   512
24
25 static struct kmem_cache        *ceph_osd_request_cache;
26
27 static const struct ceph_connection_operations osd_con_ops;
28
29 static void __send_queued(struct ceph_osd_client *osdc);
30 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
31 static void __register_request(struct ceph_osd_client *osdc,
32                                struct ceph_osd_request *req);
33 static void __unregister_request(struct ceph_osd_client *osdc,
34                                  struct ceph_osd_request *req);
35 static void __unregister_linger_request(struct ceph_osd_client *osdc,
36                                         struct ceph_osd_request *req);
37 static void __enqueue_request(struct ceph_osd_request *req);
38 static void __send_request(struct ceph_osd_client *osdc,
39                            struct ceph_osd_request *req);
40
41 /*
42  * Implement client access to distributed object storage cluster.
43  *
44  * All data objects are stored within a cluster/cloud of OSDs, or
45  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
46  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
47  * remote daemons serving up and coordinating consistent and safe
48  * access to storage.
49  *
50  * Cluster membership and the mapping of data objects onto storage devices
51  * are described by the osd map.
52  *
53  * We keep track of pending OSD requests (read, write), resubmit
54  * requests to different OSDs when the cluster topology/data layout
55  * change, or retry the affected requests when the communications
56  * channel with an OSD is reset.
57  */
58
59 /*
60  * calculate the mapping of a file extent onto an object, and fill out the
61  * request accordingly.  shorten extent as necessary if it crosses an
62  * object boundary.
63  *
64  * fill osd op in request message.
65  */
66 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
67                         u64 *objnum, u64 *objoff, u64 *objlen)
68 {
69         u64 orig_len = *plen;
70         int r;
71
72         /* object extent? */
73         r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
74                                           objoff, objlen);
75         if (r < 0)
76                 return r;
77         if (*objlen < orig_len) {
78                 *plen = *objlen;
79                 dout(" skipping last %llu, final file extent %llu~%llu\n",
80                      orig_len - *plen, off, *plen);
81         }
82
83         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
84
85         return 0;
86 }
87
88 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
89 {
90         memset(osd_data, 0, sizeof (*osd_data));
91         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
92 }
93
94 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
95                         struct page **pages, u64 length, u32 alignment,
96                         bool pages_from_pool, bool own_pages)
97 {
98         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
99         osd_data->pages = pages;
100         osd_data->length = length;
101         osd_data->alignment = alignment;
102         osd_data->pages_from_pool = pages_from_pool;
103         osd_data->own_pages = own_pages;
104 }
105
106 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
107                         struct ceph_pagelist *pagelist)
108 {
109         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
110         osd_data->pagelist = pagelist;
111 }
112
113 #ifdef CONFIG_BLOCK
114 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
115                         struct bio *bio, size_t bio_length)
116 {
117         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
118         osd_data->bio = bio;
119         osd_data->bio_length = bio_length;
120 }
121 #endif /* CONFIG_BLOCK */
122
123 #define osd_req_op_data(oreq, whch, typ, fld)   \
124         ({                                              \
125                 BUG_ON(whch >= (oreq)->r_num_ops);      \
126                 &(oreq)->r_ops[whch].typ.fld;           \
127         })
128
129 static struct ceph_osd_data *
130 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
131 {
132         BUG_ON(which >= osd_req->r_num_ops);
133
134         return &osd_req->r_ops[which].raw_data_in;
135 }
136
137 struct ceph_osd_data *
138 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
139                         unsigned int which)
140 {
141         return osd_req_op_data(osd_req, which, extent, osd_data);
142 }
143 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
144
145 struct ceph_osd_data *
146 osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
147                         unsigned int which)
148 {
149         return osd_req_op_data(osd_req, which, cls, response_data);
150 }
151 EXPORT_SYMBOL(osd_req_op_cls_response_data);    /* ??? */
152
153 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
154                         unsigned int which, struct page **pages,
155                         u64 length, u32 alignment,
156                         bool pages_from_pool, bool own_pages)
157 {
158         struct ceph_osd_data *osd_data;
159
160         osd_data = osd_req_op_raw_data_in(osd_req, which);
161         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
162                                 pages_from_pool, own_pages);
163 }
164 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
165
166 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
167                         unsigned int which, struct page **pages,
168                         u64 length, u32 alignment,
169                         bool pages_from_pool, bool own_pages)
170 {
171         struct ceph_osd_data *osd_data;
172
173         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
174         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
175                                 pages_from_pool, own_pages);
176 }
177 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
178
179 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
180                         unsigned int which, struct ceph_pagelist *pagelist)
181 {
182         struct ceph_osd_data *osd_data;
183
184         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
185         ceph_osd_data_pagelist_init(osd_data, pagelist);
186 }
187 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
188
189 #ifdef CONFIG_BLOCK
190 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
191                         unsigned int which, struct bio *bio, size_t bio_length)
192 {
193         struct ceph_osd_data *osd_data;
194
195         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
196         ceph_osd_data_bio_init(osd_data, bio, bio_length);
197 }
198 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
199 #endif /* CONFIG_BLOCK */
200
201 static void osd_req_op_cls_request_info_pagelist(
202                         struct ceph_osd_request *osd_req,
203                         unsigned int which, struct ceph_pagelist *pagelist)
204 {
205         struct ceph_osd_data *osd_data;
206
207         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
208         ceph_osd_data_pagelist_init(osd_data, pagelist);
209 }
210
211 void osd_req_op_cls_request_data_pagelist(
212                         struct ceph_osd_request *osd_req,
213                         unsigned int which, struct ceph_pagelist *pagelist)
214 {
215         struct ceph_osd_data *osd_data;
216
217         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
218         ceph_osd_data_pagelist_init(osd_data, pagelist);
219 }
220 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
221
222 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
223                         unsigned int which, struct page **pages, u64 length,
224                         u32 alignment, bool pages_from_pool, bool own_pages)
225 {
226         struct ceph_osd_data *osd_data;
227
228         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
229         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
230                                 pages_from_pool, own_pages);
231 }
232 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
233
234 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
235                         unsigned int which, struct page **pages, u64 length,
236                         u32 alignment, bool pages_from_pool, bool own_pages)
237 {
238         struct ceph_osd_data *osd_data;
239
240         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
241         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
242                                 pages_from_pool, own_pages);
243 }
244 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
245
246 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
247 {
248         switch (osd_data->type) {
249         case CEPH_OSD_DATA_TYPE_NONE:
250                 return 0;
251         case CEPH_OSD_DATA_TYPE_PAGES:
252                 return osd_data->length;
253         case CEPH_OSD_DATA_TYPE_PAGELIST:
254                 return (u64)osd_data->pagelist->length;
255 #ifdef CONFIG_BLOCK
256         case CEPH_OSD_DATA_TYPE_BIO:
257                 return (u64)osd_data->bio_length;
258 #endif /* CONFIG_BLOCK */
259         default:
260                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
261                 return 0;
262         }
263 }
264
265 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
266 {
267         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
268                 int num_pages;
269
270                 num_pages = calc_pages_for((u64)osd_data->alignment,
271                                                 (u64)osd_data->length);
272                 ceph_release_page_vector(osd_data->pages, num_pages);
273         }
274         ceph_osd_data_init(osd_data);
275 }
276
277 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
278                         unsigned int which)
279 {
280         struct ceph_osd_req_op *op;
281
282         BUG_ON(which >= osd_req->r_num_ops);
283         op = &osd_req->r_ops[which];
284
285         switch (op->op) {
286         case CEPH_OSD_OP_READ:
287         case CEPH_OSD_OP_WRITE:
288                 ceph_osd_data_release(&op->extent.osd_data);
289                 break;
290         case CEPH_OSD_OP_CALL:
291                 ceph_osd_data_release(&op->cls.request_info);
292                 ceph_osd_data_release(&op->cls.request_data);
293                 ceph_osd_data_release(&op->cls.response_data);
294                 break;
295         default:
296                 break;
297         }
298 }
299
300 /*
301  * requests
302  */
303 static void ceph_osdc_release_request(struct kref *kref)
304 {
305         struct ceph_osd_request *req = container_of(kref,
306                                             struct ceph_osd_request, r_kref);
307         unsigned int which;
308
309         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
310              req->r_request, req->r_reply);
311         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
312         WARN_ON(!list_empty(&req->r_req_lru_item));
313         WARN_ON(!list_empty(&req->r_osd_item));
314         WARN_ON(!list_empty(&req->r_linger_item));
315         WARN_ON(!list_empty(&req->r_linger_osd_item));
316         WARN_ON(req->r_osd);
317
318         if (req->r_request)
319                 ceph_msg_put(req->r_request);
320         if (req->r_reply) {
321                 ceph_msg_revoke_incoming(req->r_reply);
322                 ceph_msg_put(req->r_reply);
323         }
324
325         for (which = 0; which < req->r_num_ops; which++)
326                 osd_req_op_data_release(req, which);
327
328         ceph_put_snap_context(req->r_snapc);
329         if (req->r_mempool)
330                 mempool_free(req, req->r_osdc->req_mempool);
331         else
332                 kmem_cache_free(ceph_osd_request_cache, req);
333
334 }
335
336 void ceph_osdc_get_request(struct ceph_osd_request *req)
337 {
338         dout("%s %p (was %d)\n", __func__, req,
339              atomic_read(&req->r_kref.refcount));
340         kref_get(&req->r_kref);
341 }
342 EXPORT_SYMBOL(ceph_osdc_get_request);
343
344 void ceph_osdc_put_request(struct ceph_osd_request *req)
345 {
346         dout("%s %p (was %d)\n", __func__, req,
347              atomic_read(&req->r_kref.refcount));
348         kref_put(&req->r_kref, ceph_osdc_release_request);
349 }
350 EXPORT_SYMBOL(ceph_osdc_put_request);
351
352 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
353                                                struct ceph_snap_context *snapc,
354                                                unsigned int num_ops,
355                                                bool use_mempool,
356                                                gfp_t gfp_flags)
357 {
358         struct ceph_osd_request *req;
359         struct ceph_msg *msg;
360         size_t msg_size;
361
362         BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX);
363         BUG_ON(num_ops > CEPH_OSD_MAX_OP);
364
365         msg_size = 4 + 4 + 8 + 8 + 4+8;
366         msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
367         msg_size += 1 + 8 + 4 + 4;     /* pg_t */
368         msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */
369         msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
370         msg_size += 8;  /* snapid */
371         msg_size += 8;  /* snap_seq */
372         msg_size += 8 * (snapc ? snapc->num_snaps : 0);  /* snaps */
373         msg_size += 4;
374
375         if (use_mempool) {
376                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
377                 memset(req, 0, sizeof(*req));
378         } else {
379                 req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags);
380         }
381         if (req == NULL)
382                 return NULL;
383
384         req->r_osdc = osdc;
385         req->r_mempool = use_mempool;
386         req->r_num_ops = num_ops;
387
388         kref_init(&req->r_kref);
389         init_completion(&req->r_completion);
390         init_completion(&req->r_safe_completion);
391         RB_CLEAR_NODE(&req->r_node);
392         INIT_LIST_HEAD(&req->r_unsafe_item);
393         INIT_LIST_HEAD(&req->r_linger_item);
394         INIT_LIST_HEAD(&req->r_linger_osd_item);
395         INIT_LIST_HEAD(&req->r_req_lru_item);
396         INIT_LIST_HEAD(&req->r_osd_item);
397
398         req->r_base_oloc.pool = -1;
399         req->r_target_oloc.pool = -1;
400
401         /* create reply message */
402         if (use_mempool)
403                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
404         else
405                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
406                                    OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
407         if (!msg) {
408                 ceph_osdc_put_request(req);
409                 return NULL;
410         }
411         req->r_reply = msg;
412
413         /* create request message; allow space for oid */
414         if (use_mempool)
415                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
416         else
417                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
418         if (!msg) {
419                 ceph_osdc_put_request(req);
420                 return NULL;
421         }
422
423         memset(msg->front.iov_base, 0, msg->front.iov_len);
424
425         req->r_request = msg;
426
427         return req;
428 }
429 EXPORT_SYMBOL(ceph_osdc_alloc_request);
430
431 static bool osd_req_opcode_valid(u16 opcode)
432 {
433         switch (opcode) {
434 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
435 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
436 #undef GENERATE_CASE
437         default:
438                 return false;
439         }
440 }
441
442 /*
443  * This is an osd op init function for opcodes that have no data or
444  * other information associated with them.  It also serves as a
445  * common init routine for all the other init functions, below.
446  */
447 static struct ceph_osd_req_op *
448 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
449                                 u16 opcode)
450 {
451         struct ceph_osd_req_op *op;
452
453         BUG_ON(which >= osd_req->r_num_ops);
454         BUG_ON(!osd_req_opcode_valid(opcode));
455
456         op = &osd_req->r_ops[which];
457         memset(op, 0, sizeof (*op));
458         op->op = opcode;
459
460         return op;
461 }
462
463 void osd_req_op_init(struct ceph_osd_request *osd_req,
464                                 unsigned int which, u16 opcode)
465 {
466         (void)_osd_req_op_init(osd_req, which, opcode);
467 }
468 EXPORT_SYMBOL(osd_req_op_init);
469
470 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
471                                 unsigned int which, u16 opcode,
472                                 u64 offset, u64 length,
473                                 u64 truncate_size, u32 truncate_seq)
474 {
475         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
476         size_t payload_len = 0;
477
478         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
479                opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
480                opcode != CEPH_OSD_OP_TRUNCATE);
481
482         op->extent.offset = offset;
483         op->extent.length = length;
484         op->extent.truncate_size = truncate_size;
485         op->extent.truncate_seq = truncate_seq;
486         if (opcode == CEPH_OSD_OP_WRITE)
487                 payload_len += length;
488
489         op->payload_len = payload_len;
490 }
491 EXPORT_SYMBOL(osd_req_op_extent_init);
492
493 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
494                                 unsigned int which, u64 length)
495 {
496         struct ceph_osd_req_op *op;
497         u64 previous;
498
499         BUG_ON(which >= osd_req->r_num_ops);
500         op = &osd_req->r_ops[which];
501         previous = op->extent.length;
502
503         if (length == previous)
504                 return;         /* Nothing to do */
505         BUG_ON(length > previous);
506
507         op->extent.length = length;
508         op->payload_len -= previous - length;
509 }
510 EXPORT_SYMBOL(osd_req_op_extent_update);
511
512 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
513                         u16 opcode, const char *class, const char *method)
514 {
515         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
516         struct ceph_pagelist *pagelist;
517         size_t payload_len = 0;
518         size_t size;
519
520         BUG_ON(opcode != CEPH_OSD_OP_CALL);
521
522         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
523         BUG_ON(!pagelist);
524         ceph_pagelist_init(pagelist);
525
526         op->cls.class_name = class;
527         size = strlen(class);
528         BUG_ON(size > (size_t) U8_MAX);
529         op->cls.class_len = size;
530         ceph_pagelist_append(pagelist, class, size);
531         payload_len += size;
532
533         op->cls.method_name = method;
534         size = strlen(method);
535         BUG_ON(size > (size_t) U8_MAX);
536         op->cls.method_len = size;
537         ceph_pagelist_append(pagelist, method, size);
538         payload_len += size;
539
540         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
541
542         op->cls.argc = 0;       /* currently unused */
543
544         op->payload_len = payload_len;
545 }
546 EXPORT_SYMBOL(osd_req_op_cls_init);
547
548 void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
549                                 unsigned int which, u16 opcode,
550                                 u64 cookie, u64 version, int flag)
551 {
552         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
553
554         BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
555
556         op->watch.cookie = cookie;
557         op->watch.ver = version;
558         if (opcode == CEPH_OSD_OP_WATCH && flag)
559                 op->watch.flag = (u8)1;
560 }
561 EXPORT_SYMBOL(osd_req_op_watch_init);
562
563 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
564                                 unsigned int which,
565                                 u64 expected_object_size,
566                                 u64 expected_write_size)
567 {
568         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
569                                                       CEPH_OSD_OP_SETALLOCHINT);
570
571         op->alloc_hint.expected_object_size = expected_object_size;
572         op->alloc_hint.expected_write_size = expected_write_size;
573
574         /*
575          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
576          * not worth a feature bit.  Set FAILOK per-op flag to make
577          * sure older osds don't trip over an unsupported opcode.
578          */
579         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
580 }
581 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
582
583 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
584                                 struct ceph_osd_data *osd_data)
585 {
586         u64 length = ceph_osd_data_length(osd_data);
587
588         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
589                 BUG_ON(length > (u64) SIZE_MAX);
590                 if (length)
591                         ceph_msg_data_add_pages(msg, osd_data->pages,
592                                         length, osd_data->alignment);
593         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
594                 BUG_ON(!length);
595                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
596 #ifdef CONFIG_BLOCK
597         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
598                 ceph_msg_data_add_bio(msg, osd_data->bio, length);
599 #endif
600         } else {
601                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
602         }
603 }
604
605 static u64 osd_req_encode_op(struct ceph_osd_request *req,
606                               struct ceph_osd_op *dst, unsigned int which)
607 {
608         struct ceph_osd_req_op *src;
609         struct ceph_osd_data *osd_data;
610         u64 request_data_len = 0;
611         u64 data_length;
612
613         BUG_ON(which >= req->r_num_ops);
614         src = &req->r_ops[which];
615         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
616                 pr_err("unrecognized osd opcode %d\n", src->op);
617
618                 return 0;
619         }
620
621         switch (src->op) {
622         case CEPH_OSD_OP_STAT:
623                 osd_data = &src->raw_data_in;
624                 ceph_osdc_msg_data_add(req->r_reply, osd_data);
625                 break;
626         case CEPH_OSD_OP_READ:
627         case CEPH_OSD_OP_WRITE:
628         case CEPH_OSD_OP_ZERO:
629         case CEPH_OSD_OP_DELETE:
630         case CEPH_OSD_OP_TRUNCATE:
631                 if (src->op == CEPH_OSD_OP_WRITE)
632                         request_data_len = src->extent.length;
633                 dst->extent.offset = cpu_to_le64(src->extent.offset);
634                 dst->extent.length = cpu_to_le64(src->extent.length);
635                 dst->extent.truncate_size =
636                         cpu_to_le64(src->extent.truncate_size);
637                 dst->extent.truncate_seq =
638                         cpu_to_le32(src->extent.truncate_seq);
639                 osd_data = &src->extent.osd_data;
640                 if (src->op == CEPH_OSD_OP_WRITE)
641                         ceph_osdc_msg_data_add(req->r_request, osd_data);
642                 else
643                         ceph_osdc_msg_data_add(req->r_reply, osd_data);
644                 break;
645         case CEPH_OSD_OP_CALL:
646                 dst->cls.class_len = src->cls.class_len;
647                 dst->cls.method_len = src->cls.method_len;
648                 osd_data = &src->cls.request_info;
649                 ceph_osdc_msg_data_add(req->r_request, osd_data);
650                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST);
651                 request_data_len = osd_data->pagelist->length;
652
653                 osd_data = &src->cls.request_data;
654                 data_length = ceph_osd_data_length(osd_data);
655                 if (data_length) {
656                         BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE);
657                         dst->cls.indata_len = cpu_to_le32(data_length);
658                         ceph_osdc_msg_data_add(req->r_request, osd_data);
659                         src->payload_len += data_length;
660                         request_data_len += data_length;
661                 }
662                 osd_data = &src->cls.response_data;
663                 ceph_osdc_msg_data_add(req->r_reply, osd_data);
664                 break;
665         case CEPH_OSD_OP_STARTSYNC:
666                 break;
667         case CEPH_OSD_OP_NOTIFY_ACK:
668         case CEPH_OSD_OP_WATCH:
669                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
670                 dst->watch.ver = cpu_to_le64(src->watch.ver);
671                 dst->watch.flag = src->watch.flag;
672                 break;
673         case CEPH_OSD_OP_SETALLOCHINT:
674                 dst->alloc_hint.expected_object_size =
675                     cpu_to_le64(src->alloc_hint.expected_object_size);
676                 dst->alloc_hint.expected_write_size =
677                     cpu_to_le64(src->alloc_hint.expected_write_size);
678                 break;
679         default:
680                 pr_err("unsupported osd opcode %s\n",
681                         ceph_osd_op_name(src->op));
682                 WARN_ON(1);
683
684                 return 0;
685         }
686
687         dst->op = cpu_to_le16(src->op);
688         dst->flags = cpu_to_le32(src->flags);
689         dst->payload_len = cpu_to_le32(src->payload_len);
690
691         return request_data_len;
692 }
693
694 /*
695  * build new request AND message, calculate layout, and adjust file
696  * extent as needed.
697  *
698  * if the file was recently truncated, we include information about its
699  * old and new size so that the object can be updated appropriately.  (we
700  * avoid synchronously deleting truncated objects because it's slow.)
701  *
702  * if @do_sync, include a 'startsync' command so that the osd will flush
703  * data quickly.
704  */
705 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
706                                                struct ceph_file_layout *layout,
707                                                struct ceph_vino vino,
708                                                u64 off, u64 *plen, int num_ops,
709                                                int opcode, int flags,
710                                                struct ceph_snap_context *snapc,
711                                                u32 truncate_seq,
712                                                u64 truncate_size,
713                                                bool use_mempool)
714 {
715         struct ceph_osd_request *req;
716         u64 objnum = 0;
717         u64 objoff = 0;
718         u64 objlen = 0;
719         u32 object_size;
720         u64 object_base;
721         int r;
722
723         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
724                opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
725                opcode != CEPH_OSD_OP_TRUNCATE);
726
727         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
728                                         GFP_NOFS);
729         if (!req)
730                 return ERR_PTR(-ENOMEM);
731
732         req->r_flags = flags;
733
734         /* calculate max write size */
735         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
736         if (r < 0) {
737                 ceph_osdc_put_request(req);
738                 return ERR_PTR(r);
739         }
740
741         object_size = le32_to_cpu(layout->fl_object_size);
742         object_base = off - objoff;
743         if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
744                 if (truncate_size <= object_base) {
745                         truncate_size = 0;
746                 } else {
747                         truncate_size -= object_base;
748                         if (truncate_size > object_size)
749                                 truncate_size = object_size;
750                 }
751         }
752
753         osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
754                                 truncate_size, truncate_seq);
755
756         /*
757          * A second op in the ops array means the caller wants to
758          * also issue a include a 'startsync' command so that the
759          * osd will flush data quickly.
760          */
761         if (num_ops > 1)
762                 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
763
764         req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
765
766         snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
767                  "%llx.%08llx", vino.ino, objnum);
768         req->r_base_oid.name_len = strlen(req->r_base_oid.name);
769
770         return req;
771 }
772 EXPORT_SYMBOL(ceph_osdc_new_request);
773
774 /*
775  * We keep osd requests in an rbtree, sorted by ->r_tid.
776  */
777 static void __insert_request(struct ceph_osd_client *osdc,
778                              struct ceph_osd_request *new)
779 {
780         struct rb_node **p = &osdc->requests.rb_node;
781         struct rb_node *parent = NULL;
782         struct ceph_osd_request *req = NULL;
783
784         while (*p) {
785                 parent = *p;
786                 req = rb_entry(parent, struct ceph_osd_request, r_node);
787                 if (new->r_tid < req->r_tid)
788                         p = &(*p)->rb_left;
789                 else if (new->r_tid > req->r_tid)
790                         p = &(*p)->rb_right;
791                 else
792                         BUG();
793         }
794
795         rb_link_node(&new->r_node, parent, p);
796         rb_insert_color(&new->r_node, &osdc->requests);
797 }
798
799 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
800                                                  u64 tid)
801 {
802         struct ceph_osd_request *req;
803         struct rb_node *n = osdc->requests.rb_node;
804
805         while (n) {
806                 req = rb_entry(n, struct ceph_osd_request, r_node);
807                 if (tid < req->r_tid)
808                         n = n->rb_left;
809                 else if (tid > req->r_tid)
810                         n = n->rb_right;
811                 else
812                         return req;
813         }
814         return NULL;
815 }
816
817 static struct ceph_osd_request *
818 __lookup_request_ge(struct ceph_osd_client *osdc,
819                     u64 tid)
820 {
821         struct ceph_osd_request *req;
822         struct rb_node *n = osdc->requests.rb_node;
823
824         while (n) {
825                 req = rb_entry(n, struct ceph_osd_request, r_node);
826                 if (tid < req->r_tid) {
827                         if (!n->rb_left)
828                                 return req;
829                         n = n->rb_left;
830                 } else if (tid > req->r_tid) {
831                         n = n->rb_right;
832                 } else {
833                         return req;
834                 }
835         }
836         return NULL;
837 }
838
839 static void __kick_linger_request(struct ceph_osd_request *req)
840 {
841         struct ceph_osd_client *osdc = req->r_osdc;
842         struct ceph_osd *osd = req->r_osd;
843
844         /*
845          * Linger requests need to be resent with a new tid to avoid
846          * the dup op detection logic on the OSDs.  Achieve this with
847          * a re-register dance instead of open-coding.
848          */
849         ceph_osdc_get_request(req);
850         if (!list_empty(&req->r_linger_item))
851                 __unregister_linger_request(osdc, req);
852         else
853                 __unregister_request(osdc, req);
854         __register_request(osdc, req);
855         ceph_osdc_put_request(req);
856
857         /*
858          * Unless request has been registered as both normal and
859          * lingering, __unregister{,_linger}_request clears r_osd.
860          * However, here we need to preserve r_osd to make sure we
861          * requeue on the same OSD.
862          */
863         WARN_ON(req->r_osd || !osd);
864         req->r_osd = osd;
865
866         dout("%s requeueing %p tid %llu\n", __func__, req, req->r_tid);
867         __enqueue_request(req);
868 }
869
870 /*
871  * Resubmit requests pending on the given osd.
872  */
873 static void __kick_osd_requests(struct ceph_osd_client *osdc,
874                                 struct ceph_osd *osd)
875 {
876         struct ceph_osd_request *req, *nreq;
877         LIST_HEAD(resend);
878         LIST_HEAD(resend_linger);
879         int err;
880
881         dout("%s osd%d\n", __func__, osd->o_osd);
882         err = __reset_osd(osdc, osd);
883         if (err)
884                 return;
885
886         /*
887          * Build up a list of requests to resend by traversing the
888          * osd's list of requests.  Requests for a given object are
889          * sent in tid order, and that is also the order they're
890          * kept on this list.  Therefore all requests that are in
891          * flight will be found first, followed by all requests that
892          * have not yet been sent.  And to resend requests while
893          * preserving this order we will want to put any sent
894          * requests back on the front of the osd client's unsent
895          * list.
896          *
897          * So we build a separate ordered list of already-sent
898          * requests for the affected osd and splice it onto the
899          * front of the osd client's unsent list.  Once we've seen a
900          * request that has not yet been sent we're done.  Those
901          * requests are already sitting right where they belong.
902          */
903         list_for_each_entry(req, &osd->o_requests, r_osd_item) {
904                 if (!req->r_sent)
905                         break;
906
907                 if (!req->r_linger) {
908                         dout("%s requeueing %p tid %llu\n", __func__, req,
909                              req->r_tid);
910                         list_move_tail(&req->r_req_lru_item, &resend);
911                         req->r_flags |= CEPH_OSD_FLAG_RETRY;
912                 } else {
913                         list_move_tail(&req->r_req_lru_item, &resend_linger);
914                 }
915         }
916         list_splice(&resend, &osdc->req_unsent);
917
918         /*
919          * Both registered and not yet registered linger requests are
920          * enqueued with a new tid on the same OSD.  We add/move them
921          * to req_unsent/o_requests at the end to keep things in tid
922          * order.
923          */
924         list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
925                                  r_linger_osd_item) {
926                 WARN_ON(!list_empty(&req->r_req_lru_item));
927                 __kick_linger_request(req);
928         }
929
930         list_for_each_entry_safe(req, nreq, &resend_linger, r_req_lru_item)
931                 __kick_linger_request(req);
932 }
933
934 /*
935  * If the osd connection drops, we need to resubmit all requests.
936  */
937 static void osd_reset(struct ceph_connection *con)
938 {
939         struct ceph_osd *osd = con->private;
940         struct ceph_osd_client *osdc;
941
942         if (!osd)
943                 return;
944         dout("osd_reset osd%d\n", osd->o_osd);
945         osdc = osd->o_osdc;
946         down_read(&osdc->map_sem);
947         mutex_lock(&osdc->request_mutex);
948         __kick_osd_requests(osdc, osd);
949         __send_queued(osdc);
950         mutex_unlock(&osdc->request_mutex);
951         up_read(&osdc->map_sem);
952 }
953
954 /*
955  * Track open sessions with osds.
956  */
957 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
958 {
959         struct ceph_osd *osd;
960
961         osd = kzalloc(sizeof(*osd), GFP_NOFS);
962         if (!osd)
963                 return NULL;
964
965         atomic_set(&osd->o_ref, 1);
966         osd->o_osdc = osdc;
967         osd->o_osd = onum;
968         RB_CLEAR_NODE(&osd->o_node);
969         INIT_LIST_HEAD(&osd->o_requests);
970         INIT_LIST_HEAD(&osd->o_linger_requests);
971         INIT_LIST_HEAD(&osd->o_osd_lru);
972         osd->o_incarnation = 1;
973
974         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
975
976         INIT_LIST_HEAD(&osd->o_keepalive_item);
977         return osd;
978 }
979
980 static struct ceph_osd *get_osd(struct ceph_osd *osd)
981 {
982         if (atomic_inc_not_zero(&osd->o_ref)) {
983                 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
984                      atomic_read(&osd->o_ref));
985                 return osd;
986         } else {
987                 dout("get_osd %p FAIL\n", osd);
988                 return NULL;
989         }
990 }
991
992 static void put_osd(struct ceph_osd *osd)
993 {
994         dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
995              atomic_read(&osd->o_ref) - 1);
996         if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
997                 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
998
999                 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
1000                 kfree(osd);
1001         }
1002 }
1003
1004 /*
1005  * remove an osd from our map
1006  */
1007 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1008 {
1009         dout("__remove_osd %p\n", osd);
1010         WARN_ON(!list_empty(&osd->o_requests));
1011         WARN_ON(!list_empty(&osd->o_linger_requests));
1012
1013         rb_erase(&osd->o_node, &osdc->osds);
1014         list_del_init(&osd->o_osd_lru);
1015         ceph_con_close(&osd->o_con);
1016         put_osd(osd);
1017 }
1018
1019 static void remove_all_osds(struct ceph_osd_client *osdc)
1020 {
1021         dout("%s %p\n", __func__, osdc);
1022         mutex_lock(&osdc->request_mutex);
1023         while (!RB_EMPTY_ROOT(&osdc->osds)) {
1024                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
1025                                                 struct ceph_osd, o_node);
1026                 __remove_osd(osdc, osd);
1027         }
1028         mutex_unlock(&osdc->request_mutex);
1029 }
1030
1031 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
1032                               struct ceph_osd *osd)
1033 {
1034         dout("%s %p\n", __func__, osd);
1035         BUG_ON(!list_empty(&osd->o_osd_lru));
1036
1037         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1038         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
1039 }
1040
1041 static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc,
1042                                   struct ceph_osd *osd)
1043 {
1044         dout("%s %p\n", __func__, osd);
1045
1046         if (list_empty(&osd->o_requests) &&
1047             list_empty(&osd->o_linger_requests))
1048                 __move_osd_to_lru(osdc, osd);
1049 }
1050
1051 static void __remove_osd_from_lru(struct ceph_osd *osd)
1052 {
1053         dout("__remove_osd_from_lru %p\n", osd);
1054         if (!list_empty(&osd->o_osd_lru))
1055                 list_del_init(&osd->o_osd_lru);
1056 }
1057
1058 static void remove_old_osds(struct ceph_osd_client *osdc)
1059 {
1060         struct ceph_osd *osd, *nosd;
1061
1062         dout("__remove_old_osds %p\n", osdc);
1063         mutex_lock(&osdc->request_mutex);
1064         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
1065                 if (time_before(jiffies, osd->lru_ttl))
1066                         break;
1067                 __remove_osd(osdc, osd);
1068         }
1069         mutex_unlock(&osdc->request_mutex);
1070 }
1071
1072 /*
1073  * reset osd connect
1074  */
1075 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1076 {
1077         struct ceph_entity_addr *peer_addr;
1078
1079         dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
1080         if (list_empty(&osd->o_requests) &&
1081             list_empty(&osd->o_linger_requests)) {
1082                 __remove_osd(osdc, osd);
1083
1084                 return -ENODEV;
1085         }
1086
1087         peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
1088         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1089                         !ceph_con_opened(&osd->o_con)) {
1090                 struct ceph_osd_request *req;
1091
1092                 dout("osd addr hasn't changed and connection never opened, "
1093                      "letting msgr retry\n");
1094                 /* touch each r_stamp for handle_timeout()'s benfit */
1095                 list_for_each_entry(req, &osd->o_requests, r_osd_item)
1096                         req->r_stamp = jiffies;
1097
1098                 return -EAGAIN;
1099         }
1100
1101         ceph_con_close(&osd->o_con);
1102         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1103         osd->o_incarnation++;
1104
1105         return 0;
1106 }
1107
1108 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
1109 {
1110         struct rb_node **p = &osdc->osds.rb_node;
1111         struct rb_node *parent = NULL;
1112         struct ceph_osd *osd = NULL;
1113
1114         dout("__insert_osd %p osd%d\n", new, new->o_osd);
1115         while (*p) {
1116                 parent = *p;
1117                 osd = rb_entry(parent, struct ceph_osd, o_node);
1118                 if (new->o_osd < osd->o_osd)
1119                         p = &(*p)->rb_left;
1120                 else if (new->o_osd > osd->o_osd)
1121                         p = &(*p)->rb_right;
1122                 else
1123                         BUG();
1124         }
1125
1126         rb_link_node(&new->o_node, parent, p);
1127         rb_insert_color(&new->o_node, &osdc->osds);
1128 }
1129
1130 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
1131 {
1132         struct ceph_osd *osd;
1133         struct rb_node *n = osdc->osds.rb_node;
1134
1135         while (n) {
1136                 osd = rb_entry(n, struct ceph_osd, o_node);
1137                 if (o < osd->o_osd)
1138                         n = n->rb_left;
1139                 else if (o > osd->o_osd)
1140                         n = n->rb_right;
1141                 else
1142                         return osd;
1143         }
1144         return NULL;
1145 }
1146
1147 static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
1148 {
1149         schedule_delayed_work(&osdc->timeout_work,
1150                         osdc->client->options->osd_keepalive_timeout * HZ);
1151 }
1152
1153 static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
1154 {
1155         cancel_delayed_work(&osdc->timeout_work);
1156 }
1157
1158 /*
1159  * Register request, assign tid.  If this is the first request, set up
1160  * the timeout event.
1161  */
1162 static void __register_request(struct ceph_osd_client *osdc,
1163                                struct ceph_osd_request *req)
1164 {
1165         req->r_tid = ++osdc->last_tid;
1166         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1167         dout("__register_request %p tid %lld\n", req, req->r_tid);
1168         __insert_request(osdc, req);
1169         ceph_osdc_get_request(req);
1170         osdc->num_requests++;
1171         if (osdc->num_requests == 1) {
1172                 dout(" first request, scheduling timeout\n");
1173                 __schedule_osd_timeout(osdc);
1174         }
1175 }
1176
1177 /*
1178  * called under osdc->request_mutex
1179  */
1180 static void __unregister_request(struct ceph_osd_client *osdc,
1181                                  struct ceph_osd_request *req)
1182 {
1183         if (RB_EMPTY_NODE(&req->r_node)) {
1184                 dout("__unregister_request %p tid %lld not registered\n",
1185                         req, req->r_tid);
1186                 return;
1187         }
1188
1189         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1190         rb_erase(&req->r_node, &osdc->requests);
1191         RB_CLEAR_NODE(&req->r_node);
1192         osdc->num_requests--;
1193
1194         if (req->r_osd) {
1195                 /* make sure the original request isn't in flight. */
1196                 ceph_msg_revoke(req->r_request);
1197
1198                 list_del_init(&req->r_osd_item);
1199                 maybe_move_osd_to_lru(osdc, req->r_osd);
1200                 if (list_empty(&req->r_linger_osd_item))
1201                         req->r_osd = NULL;
1202         }
1203
1204         list_del_init(&req->r_req_lru_item);
1205         ceph_osdc_put_request(req);
1206
1207         if (osdc->num_requests == 0) {
1208                 dout(" no requests, canceling timeout\n");
1209                 __cancel_osd_timeout(osdc);
1210         }
1211 }
1212
1213 /*
1214  * Cancel a previously queued request message
1215  */
1216 static void __cancel_request(struct ceph_osd_request *req)
1217 {
1218         if (req->r_sent && req->r_osd) {
1219                 ceph_msg_revoke(req->r_request);
1220                 req->r_sent = 0;
1221         }
1222 }
1223
1224 static void __register_linger_request(struct ceph_osd_client *osdc,
1225                                     struct ceph_osd_request *req)
1226 {
1227         dout("%s %p tid %llu\n", __func__, req, req->r_tid);
1228         WARN_ON(!req->r_linger);
1229
1230         ceph_osdc_get_request(req);
1231         list_add_tail(&req->r_linger_item, &osdc->req_linger);
1232         if (req->r_osd)
1233                 list_add_tail(&req->r_linger_osd_item,
1234                               &req->r_osd->o_linger_requests);
1235 }
1236
1237 static void __unregister_linger_request(struct ceph_osd_client *osdc,
1238                                         struct ceph_osd_request *req)
1239 {
1240         WARN_ON(!req->r_linger);
1241
1242         if (list_empty(&req->r_linger_item)) {
1243                 dout("%s %p tid %llu not registered\n", __func__, req,
1244                      req->r_tid);
1245                 return;
1246         }
1247
1248         dout("%s %p tid %llu\n", __func__, req, req->r_tid);
1249         list_del_init(&req->r_linger_item);
1250
1251         if (req->r_osd) {
1252                 list_del_init(&req->r_linger_osd_item);
1253                 maybe_move_osd_to_lru(osdc, req->r_osd);
1254                 if (list_empty(&req->r_osd_item))
1255                         req->r_osd = NULL;
1256         }
1257
1258         list_del_init(&req->r_req_lru_item); /* can be on notarget */
1259         ceph_osdc_put_request(req);
1260 }
1261
1262 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1263                                   struct ceph_osd_request *req)
1264 {
1265         if (!req->r_linger) {
1266                 dout("set_request_linger %p\n", req);
1267                 req->r_linger = 1;
1268         }
1269 }
1270 EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1271
1272 /*
1273  * Returns whether a request should be blocked from being sent
1274  * based on the current osdmap and osd_client settings.
1275  *
1276  * Caller should hold map_sem for read.
1277  */
1278 static bool __req_should_be_paused(struct ceph_osd_client *osdc,
1279                                    struct ceph_osd_request *req)
1280 {
1281         bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
1282         bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
1283                 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
1284         return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
1285                 (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
1286 }
1287
1288 /*
1289  * Calculate mapping of a request to a PG.  Takes tiering into account.
1290  */
1291 static int __calc_request_pg(struct ceph_osdmap *osdmap,
1292                              struct ceph_osd_request *req,
1293                              struct ceph_pg *pg_out)
1294 {
1295         bool need_check_tiering;
1296
1297         need_check_tiering = false;
1298         if (req->r_target_oloc.pool == -1) {
1299                 req->r_target_oloc = req->r_base_oloc; /* struct */
1300                 need_check_tiering = true;
1301         }
1302         if (req->r_target_oid.name_len == 0) {
1303                 ceph_oid_copy(&req->r_target_oid, &req->r_base_oid);
1304                 need_check_tiering = true;
1305         }
1306
1307         if (need_check_tiering &&
1308             (req->r_flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1309                 struct ceph_pg_pool_info *pi;
1310
1311                 pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool);
1312                 if (pi) {
1313                         if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1314                             pi->read_tier >= 0)
1315                                 req->r_target_oloc.pool = pi->read_tier;
1316                         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1317                             pi->write_tier >= 0)
1318                                 req->r_target_oloc.pool = pi->write_tier;
1319                 }
1320                 /* !pi is caught in ceph_oloc_oid_to_pg() */
1321         }
1322
1323         return ceph_oloc_oid_to_pg(osdmap, &req->r_target_oloc,
1324                                    &req->r_target_oid, pg_out);
1325 }
1326
1327 static void __enqueue_request(struct ceph_osd_request *req)
1328 {
1329         struct ceph_osd_client *osdc = req->r_osdc;
1330
1331         dout("%s %p tid %llu to osd%d\n", __func__, req, req->r_tid,
1332              req->r_osd ? req->r_osd->o_osd : -1);
1333
1334         if (req->r_osd) {
1335                 __remove_osd_from_lru(req->r_osd);
1336                 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
1337                 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
1338         } else {
1339                 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
1340         }
1341 }
1342
1343 /*
1344  * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1345  * (as needed), and set the request r_osd appropriately.  If there is
1346  * no up osd, set r_osd to NULL.  Move the request to the appropriate list
1347  * (unsent, homeless) or leave on in-flight lru.
1348  *
1349  * Return 0 if unchanged, 1 if changed, or negative on error.
1350  *
1351  * Caller should hold map_sem for read and request_mutex.
1352  */
1353 static int __map_request(struct ceph_osd_client *osdc,
1354                          struct ceph_osd_request *req, int force_resend)
1355 {
1356         struct ceph_pg pgid;
1357         int acting[CEPH_PG_MAX_SIZE];
1358         int num, o;
1359         int err;
1360         bool was_paused;
1361
1362         dout("map_request %p tid %lld\n", req, req->r_tid);
1363
1364         err = __calc_request_pg(osdc->osdmap, req, &pgid);
1365         if (err) {
1366                 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1367                 return err;
1368         }
1369         req->r_pgid = pgid;
1370
1371         num = ceph_calc_pg_acting(osdc->osdmap, pgid, acting, &o);
1372         if (num < 0)
1373                 num = 0;
1374
1375         was_paused = req->r_paused;
1376         req->r_paused = __req_should_be_paused(osdc, req);
1377         if (was_paused && !req->r_paused)
1378                 force_resend = 1;
1379
1380         if ((!force_resend &&
1381              req->r_osd && req->r_osd->o_osd == o &&
1382              req->r_sent >= req->r_osd->o_incarnation &&
1383              req->r_num_pg_osds == num &&
1384              memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
1385             (req->r_osd == NULL && o == -1) ||
1386             req->r_paused)
1387                 return 0;  /* no change */
1388
1389         dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1390              req->r_tid, pgid.pool, pgid.seed, o,
1391              req->r_osd ? req->r_osd->o_osd : -1);
1392
1393         /* record full pg acting set */
1394         memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
1395         req->r_num_pg_osds = num;
1396
1397         if (req->r_osd) {
1398                 __cancel_request(req);
1399                 list_del_init(&req->r_osd_item);
1400                 list_del_init(&req->r_linger_osd_item);
1401                 req->r_osd = NULL;
1402         }
1403
1404         req->r_osd = __lookup_osd(osdc, o);
1405         if (!req->r_osd && o >= 0) {
1406                 err = -ENOMEM;
1407                 req->r_osd = create_osd(osdc, o);
1408                 if (!req->r_osd) {
1409                         list_move(&req->r_req_lru_item, &osdc->req_notarget);
1410                         goto out;
1411                 }
1412
1413                 dout("map_request osd %p is osd%d\n", req->r_osd, o);
1414                 __insert_osd(osdc, req->r_osd);
1415
1416                 ceph_con_open(&req->r_osd->o_con,
1417                               CEPH_ENTITY_TYPE_OSD, o,
1418                               &osdc->osdmap->osd_addr[o]);
1419         }
1420
1421         __enqueue_request(req);
1422         err = 1;   /* osd or pg changed */
1423
1424 out:
1425         return err;
1426 }
1427
1428 /*
1429  * caller should hold map_sem (for read) and request_mutex
1430  */
1431 static void __send_request(struct ceph_osd_client *osdc,
1432                            struct ceph_osd_request *req)
1433 {
1434         void *p;
1435
1436         dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1437              req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1438              (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1439
1440         /* fill in message content that changes each time we send it */
1441         put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1442         put_unaligned_le32(req->r_flags, req->r_request_flags);
1443         put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool);
1444         p = req->r_request_pgid;
1445         ceph_encode_64(&p, req->r_pgid.pool);
1446         ceph_encode_32(&p, req->r_pgid.seed);
1447         put_unaligned_le64(1, req->r_request_attempts);  /* FIXME */
1448         memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1449                sizeof(req->r_reassert_version));
1450
1451         req->r_stamp = jiffies;
1452         list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
1453
1454         ceph_msg_get(req->r_request); /* send consumes a ref */
1455
1456         req->r_sent = req->r_osd->o_incarnation;
1457
1458         ceph_con_send(&req->r_osd->o_con, req->r_request);
1459 }
1460
1461 /*
1462  * Send any requests in the queue (req_unsent).
1463  */
1464 static void __send_queued(struct ceph_osd_client *osdc)
1465 {
1466         struct ceph_osd_request *req, *tmp;
1467
1468         dout("__send_queued\n");
1469         list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
1470                 __send_request(osdc, req);
1471 }
1472
1473 /*
1474  * Caller should hold map_sem for read and request_mutex.
1475  */
1476 static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
1477                                      struct ceph_osd_request *req,
1478                                      bool nofail)
1479 {
1480         int rc;
1481
1482         __register_request(osdc, req);
1483         req->r_sent = 0;
1484         req->r_got_reply = 0;
1485         rc = __map_request(osdc, req, 0);
1486         if (rc < 0) {
1487                 if (nofail) {
1488                         dout("osdc_start_request failed map, "
1489                                 " will retry %lld\n", req->r_tid);
1490                         rc = 0;
1491                 } else {
1492                         __unregister_request(osdc, req);
1493                 }
1494                 return rc;
1495         }
1496
1497         if (req->r_osd == NULL) {
1498                 dout("send_request %p no up osds in pg\n", req);
1499                 ceph_monc_request_next_osdmap(&osdc->client->monc);
1500         } else {
1501                 __send_queued(osdc);
1502         }
1503
1504         return 0;
1505 }
1506
1507 /*
1508  * Timeout callback, called every N seconds when 1 or more osd
1509  * requests has been active for more than N seconds.  When this
1510  * happens, we ping all OSDs with requests who have timed out to
1511  * ensure any communications channel reset is detected.  Reset the
1512  * request timeouts another N seconds in the future as we go.
1513  * Reschedule the timeout event another N seconds in future (unless
1514  * there are no open requests).
1515  */
1516 static void handle_timeout(struct work_struct *work)
1517 {
1518         struct ceph_osd_client *osdc =
1519                 container_of(work, struct ceph_osd_client, timeout_work.work);
1520         struct ceph_osd_request *req;
1521         struct ceph_osd *osd;
1522         unsigned long keepalive =
1523                 osdc->client->options->osd_keepalive_timeout * HZ;
1524         struct list_head slow_osds;
1525         dout("timeout\n");
1526         down_read(&osdc->map_sem);
1527
1528         ceph_monc_request_next_osdmap(&osdc->client->monc);
1529
1530         mutex_lock(&osdc->request_mutex);
1531
1532         /*
1533          * ping osds that are a bit slow.  this ensures that if there
1534          * is a break in the TCP connection we will notice, and reopen
1535          * a connection with that osd (from the fault callback).
1536          */
1537         INIT_LIST_HEAD(&slow_osds);
1538         list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
1539                 if (time_before(jiffies, req->r_stamp + keepalive))
1540                         break;
1541
1542                 osd = req->r_osd;
1543                 BUG_ON(!osd);
1544                 dout(" tid %llu is slow, will send keepalive on osd%d\n",
1545                      req->r_tid, osd->o_osd);
1546                 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1547         }
1548         while (!list_empty(&slow_osds)) {
1549                 osd = list_entry(slow_osds.next, struct ceph_osd,
1550                                  o_keepalive_item);
1551                 list_del_init(&osd->o_keepalive_item);
1552                 ceph_con_keepalive(&osd->o_con);
1553         }
1554
1555         __schedule_osd_timeout(osdc);
1556         __send_queued(osdc);
1557         mutex_unlock(&osdc->request_mutex);
1558         up_read(&osdc->map_sem);
1559 }
1560
1561 static void handle_osds_timeout(struct work_struct *work)
1562 {
1563         struct ceph_osd_client *osdc =
1564                 container_of(work, struct ceph_osd_client,
1565                              osds_timeout_work.work);
1566         unsigned long delay =
1567                 osdc->client->options->osd_idle_ttl * HZ >> 2;
1568
1569         dout("osds timeout\n");
1570         down_read(&osdc->map_sem);
1571         remove_old_osds(osdc);
1572         up_read(&osdc->map_sem);
1573
1574         schedule_delayed_work(&osdc->osds_timeout_work,
1575                               round_jiffies_relative(delay));
1576 }
1577
1578 static int ceph_oloc_decode(void **p, void *end,
1579                             struct ceph_object_locator *oloc)
1580 {
1581         u8 struct_v, struct_cv;
1582         u32 len;
1583         void *struct_end;
1584         int ret = 0;
1585
1586         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
1587         struct_v = ceph_decode_8(p);
1588         struct_cv = ceph_decode_8(p);
1589         if (struct_v < 3) {
1590                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
1591                         struct_v, struct_cv);
1592                 goto e_inval;
1593         }
1594         if (struct_cv > 6) {
1595                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
1596                         struct_v, struct_cv);
1597                 goto e_inval;
1598         }
1599         len = ceph_decode_32(p);
1600         ceph_decode_need(p, end, len, e_inval);
1601         struct_end = *p + len;
1602
1603         oloc->pool = ceph_decode_64(p);
1604         *p += 4; /* skip preferred */
1605
1606         len = ceph_decode_32(p);
1607         if (len > 0) {
1608                 pr_warn("ceph_object_locator::key is set\n");
1609                 goto e_inval;
1610         }
1611
1612         if (struct_v >= 5) {
1613                 len = ceph_decode_32(p);
1614                 if (len > 0) {
1615                         pr_warn("ceph_object_locator::nspace is set\n");
1616                         goto e_inval;
1617                 }
1618         }
1619
1620         if (struct_v >= 6) {
1621                 s64 hash = ceph_decode_64(p);
1622                 if (hash != -1) {
1623                         pr_warn("ceph_object_locator::hash is set\n");
1624                         goto e_inval;
1625                 }
1626         }
1627
1628         /* skip the rest */
1629         *p = struct_end;
1630 out:
1631         return ret;
1632
1633 e_inval:
1634         ret = -EINVAL;
1635         goto out;
1636 }
1637
1638 static int ceph_redirect_decode(void **p, void *end,
1639                                 struct ceph_request_redirect *redir)
1640 {
1641         u8 struct_v, struct_cv;
1642         u32 len;
1643         void *struct_end;
1644         int ret;
1645
1646         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
1647         struct_v = ceph_decode_8(p);
1648         struct_cv = ceph_decode_8(p);
1649         if (struct_cv > 1) {
1650                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
1651                         struct_v, struct_cv);
1652                 goto e_inval;
1653         }
1654         len = ceph_decode_32(p);
1655         ceph_decode_need(p, end, len, e_inval);
1656         struct_end = *p + len;
1657
1658         ret = ceph_oloc_decode(p, end, &redir->oloc);
1659         if (ret)
1660                 goto out;
1661
1662         len = ceph_decode_32(p);
1663         if (len > 0) {
1664                 pr_warn("ceph_request_redirect::object_name is set\n");
1665                 goto e_inval;
1666         }
1667
1668         len = ceph_decode_32(p);
1669         *p += len; /* skip osd_instructions */
1670
1671         /* skip the rest */
1672         *p = struct_end;
1673 out:
1674         return ret;
1675
1676 e_inval:
1677         ret = -EINVAL;
1678         goto out;
1679 }
1680
1681 static void complete_request(struct ceph_osd_request *req)
1682 {
1683         complete_all(&req->r_safe_completion);  /* fsync waiter */
1684 }
1685
1686 /*
1687  * handle osd op reply.  either call the callback if it is specified,
1688  * or do the completion to wake up the waiting thread.
1689  */
1690 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1691                          struct ceph_connection *con)
1692 {
1693         void *p, *end;
1694         struct ceph_osd_request *req;
1695         struct ceph_request_redirect redir;
1696         u64 tid;
1697         int object_len;
1698         unsigned int numops;
1699         int payload_len, flags;
1700         s32 result;
1701         s32 retry_attempt;
1702         struct ceph_pg pg;
1703         int err;
1704         u32 reassert_epoch;
1705         u64 reassert_version;
1706         u32 osdmap_epoch;
1707         int already_completed;
1708         u32 bytes;
1709         unsigned int i;
1710
1711         tid = le64_to_cpu(msg->hdr.tid);
1712         dout("handle_reply %p tid %llu\n", msg, tid);
1713
1714         p = msg->front.iov_base;
1715         end = p + msg->front.iov_len;
1716
1717         ceph_decode_need(&p, end, 4, bad);
1718         object_len = ceph_decode_32(&p);
1719         ceph_decode_need(&p, end, object_len, bad);
1720         p += object_len;
1721
1722         err = ceph_decode_pgid(&p, end, &pg);
1723         if (err)
1724                 goto bad;
1725
1726         ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1727         flags = ceph_decode_64(&p);
1728         result = ceph_decode_32(&p);
1729         reassert_epoch = ceph_decode_32(&p);
1730         reassert_version = ceph_decode_64(&p);
1731         osdmap_epoch = ceph_decode_32(&p);
1732
1733         /* lookup */
1734         down_read(&osdc->map_sem);
1735         mutex_lock(&osdc->request_mutex);
1736         req = __lookup_request(osdc, tid);
1737         if (req == NULL) {
1738                 dout("handle_reply tid %llu dne\n", tid);
1739                 goto bad_mutex;
1740         }
1741         ceph_osdc_get_request(req);
1742
1743         dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1744              req, result);
1745
1746         ceph_decode_need(&p, end, 4, bad_put);
1747         numops = ceph_decode_32(&p);
1748         if (numops > CEPH_OSD_MAX_OP)
1749                 goto bad_put;
1750         if (numops != req->r_num_ops)
1751                 goto bad_put;
1752         payload_len = 0;
1753         ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put);
1754         for (i = 0; i < numops; i++) {
1755                 struct ceph_osd_op *op = p;
1756                 int len;
1757
1758                 len = le32_to_cpu(op->payload_len);
1759                 req->r_reply_op_len[i] = len;
1760                 dout(" op %d has %d bytes\n", i, len);
1761                 payload_len += len;
1762                 p += sizeof(*op);
1763         }
1764         bytes = le32_to_cpu(msg->hdr.data_len);
1765         if (payload_len != bytes) {
1766                 pr_warn("sum of op payload lens %d != data_len %d\n",
1767                         payload_len, bytes);
1768                 goto bad_put;
1769         }
1770
1771         ceph_decode_need(&p, end, 4 + numops * 4, bad_put);
1772         retry_attempt = ceph_decode_32(&p);
1773         for (i = 0; i < numops; i++)
1774                 req->r_reply_op_result[i] = ceph_decode_32(&p);
1775
1776         if (le16_to_cpu(msg->hdr.version) >= 6) {
1777                 p += 8 + 4; /* skip replay_version */
1778                 p += 8; /* skip user_version */
1779
1780                 err = ceph_redirect_decode(&p, end, &redir);
1781                 if (err)
1782                         goto bad_put;
1783         } else {
1784                 redir.oloc.pool = -1;
1785         }
1786
1787         if (redir.oloc.pool != -1) {
1788                 dout("redirect pool %lld\n", redir.oloc.pool);
1789
1790                 __unregister_request(osdc, req);
1791
1792                 req->r_target_oloc = redir.oloc; /* struct */
1793
1794                 /*
1795                  * Start redirect requests with nofail=true.  If
1796                  * mapping fails, request will end up on the notarget
1797                  * list, waiting for the new osdmap (which can take
1798                  * a while), even though the original request mapped
1799                  * successfully.  In the future we might want to follow
1800                  * original request's nofail setting here.
1801                  */
1802                 err = __ceph_osdc_start_request(osdc, req, true);
1803                 BUG_ON(err);
1804
1805                 goto out_unlock;
1806         }
1807
1808         already_completed = req->r_got_reply;
1809         if (!req->r_got_reply) {
1810                 req->r_result = result;
1811                 dout("handle_reply result %d bytes %d\n", req->r_result,
1812                      bytes);
1813                 if (req->r_result == 0)
1814                         req->r_result = bytes;
1815
1816                 /* in case this is a write and we need to replay, */
1817                 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1818                 req->r_reassert_version.version = cpu_to_le64(reassert_version);
1819
1820                 req->r_got_reply = 1;
1821         } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1822                 dout("handle_reply tid %llu dup ack\n", tid);
1823                 goto out_unlock;
1824         }
1825
1826         dout("handle_reply tid %llu flags %d\n", tid, flags);
1827
1828         if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1829                 __register_linger_request(osdc, req);
1830
1831         /* either this is a read, or we got the safe response */
1832         if (result < 0 ||
1833             (flags & CEPH_OSD_FLAG_ONDISK) ||
1834             ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1835                 __unregister_request(osdc, req);
1836
1837         mutex_unlock(&osdc->request_mutex);
1838         up_read(&osdc->map_sem);
1839
1840         if (!already_completed) {
1841                 if (req->r_unsafe_callback &&
1842                     result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK))
1843                         req->r_unsafe_callback(req, true);
1844                 if (req->r_callback)
1845                         req->r_callback(req, msg);
1846                 else
1847                         complete_all(&req->r_completion);
1848         }
1849
1850         if (flags & CEPH_OSD_FLAG_ONDISK) {
1851                 if (req->r_unsafe_callback && already_completed)
1852                         req->r_unsafe_callback(req, false);
1853                 complete_request(req);
1854         }
1855
1856 out:
1857         dout("req=%p req->r_linger=%d\n", req, req->r_linger);
1858         ceph_osdc_put_request(req);
1859         return;
1860 out_unlock:
1861         mutex_unlock(&osdc->request_mutex);
1862         up_read(&osdc->map_sem);
1863         goto out;
1864
1865 bad_put:
1866         req->r_result = -EIO;
1867         __unregister_request(osdc, req);
1868         if (req->r_callback)
1869                 req->r_callback(req, msg);
1870         else
1871                 complete_all(&req->r_completion);
1872         complete_request(req);
1873         ceph_osdc_put_request(req);
1874 bad_mutex:
1875         mutex_unlock(&osdc->request_mutex);
1876         up_read(&osdc->map_sem);
1877 bad:
1878         pr_err("corrupt osd_op_reply got %d %d\n",
1879                (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
1880         ceph_msg_dump(msg);
1881 }
1882
1883 static void reset_changed_osds(struct ceph_osd_client *osdc)
1884 {
1885         struct rb_node *p, *n;
1886
1887         for (p = rb_first(&osdc->osds); p; p = n) {
1888                 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
1889
1890                 n = rb_next(p);
1891                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1892                     memcmp(&osd->o_con.peer_addr,
1893                            ceph_osd_addr(osdc->osdmap,
1894                                          osd->o_osd),
1895                            sizeof(struct ceph_entity_addr)) != 0)
1896                         __reset_osd(osdc, osd);
1897         }
1898 }
1899
1900 /*
1901  * Requeue requests whose mapping to an OSD has changed.  If requests map to
1902  * no osd, request a new map.
1903  *
1904  * Caller should hold map_sem for read.
1905  */
1906 static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
1907                           bool force_resend_writes)
1908 {
1909         struct ceph_osd_request *req, *nreq;
1910         struct rb_node *p;
1911         int needmap = 0;
1912         int err;
1913         bool force_resend_req;
1914
1915         dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
1916                 force_resend_writes ? " (force resend writes)" : "");
1917         mutex_lock(&osdc->request_mutex);
1918         for (p = rb_first(&osdc->requests); p; ) {
1919                 req = rb_entry(p, struct ceph_osd_request, r_node);
1920                 p = rb_next(p);
1921
1922                 /*
1923                  * For linger requests that have not yet been
1924                  * registered, move them to the linger list; they'll
1925                  * be sent to the osd in the loop below.  Unregister
1926                  * the request before re-registering it as a linger
1927                  * request to ensure the __map_request() below
1928                  * will decide it needs to be sent.
1929                  */
1930                 if (req->r_linger && list_empty(&req->r_linger_item)) {
1931                         dout("%p tid %llu restart on osd%d\n",
1932                              req, req->r_tid,
1933                              req->r_osd ? req->r_osd->o_osd : -1);
1934                         ceph_osdc_get_request(req);
1935                         __unregister_request(osdc, req);
1936                         __register_linger_request(osdc, req);
1937                         ceph_osdc_put_request(req);
1938                         continue;
1939                 }
1940
1941                 force_resend_req = force_resend ||
1942                         (force_resend_writes &&
1943                                 req->r_flags & CEPH_OSD_FLAG_WRITE);
1944                 err = __map_request(osdc, req, force_resend_req);
1945                 if (err < 0)
1946                         continue;  /* error */
1947                 if (req->r_osd == NULL) {
1948                         dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1949                         needmap++;  /* request a newer map */
1950                 } else if (err > 0) {
1951                         if (!req->r_linger) {
1952                                 dout("%p tid %llu requeued on osd%d\n", req,
1953                                      req->r_tid,
1954                                      req->r_osd ? req->r_osd->o_osd : -1);
1955                                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1956                         }
1957                 }
1958         }
1959
1960         list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1961                                  r_linger_item) {
1962                 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1963
1964                 err = __map_request(osdc, req,
1965                                     force_resend || force_resend_writes);
1966                 dout("__map_request returned %d\n", err);
1967                 if (err == 0)
1968                         continue;  /* no change and no osd was specified */
1969                 if (err < 0)
1970                         continue;  /* hrm! */
1971                 if (req->r_osd == NULL) {
1972                         dout("tid %llu maps to no valid osd\n", req->r_tid);
1973                         needmap++;  /* request a newer map */
1974                         continue;
1975                 }
1976
1977                 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1978                      req->r_osd ? req->r_osd->o_osd : -1);
1979                 __register_request(osdc, req);
1980                 __unregister_linger_request(osdc, req);
1981         }
1982         reset_changed_osds(osdc);
1983         mutex_unlock(&osdc->request_mutex);
1984
1985         if (needmap) {
1986                 dout("%d requests for down osds, need new map\n", needmap);
1987                 ceph_monc_request_next_osdmap(&osdc->client->monc);
1988         }
1989 }
1990
1991
1992 /*
1993  * Process updated osd map.
1994  *
1995  * The message contains any number of incremental and full maps, normally
1996  * indicating some sort of topology change in the cluster.  Kick requests
1997  * off to different OSDs as needed.
1998  */
1999 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
2000 {
2001         void *p, *end, *next;
2002         u32 nr_maps, maplen;
2003         u32 epoch;
2004         struct ceph_osdmap *newmap = NULL, *oldmap;
2005         int err;
2006         struct ceph_fsid fsid;
2007         bool was_full;
2008
2009         dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
2010         p = msg->front.iov_base;
2011         end = p + msg->front.iov_len;
2012
2013         /* verify fsid */
2014         ceph_decode_need(&p, end, sizeof(fsid), bad);
2015         ceph_decode_copy(&p, &fsid, sizeof(fsid));
2016         if (ceph_check_fsid(osdc->client, &fsid) < 0)
2017                 return;
2018
2019         down_write(&osdc->map_sem);
2020
2021         was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
2022
2023         /* incremental maps */
2024         ceph_decode_32_safe(&p, end, nr_maps, bad);
2025         dout(" %d inc maps\n", nr_maps);
2026         while (nr_maps > 0) {
2027                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2028                 epoch = ceph_decode_32(&p);
2029                 maplen = ceph_decode_32(&p);
2030                 ceph_decode_need(&p, end, maplen, bad);
2031                 next = p + maplen;
2032                 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
2033                         dout("applying incremental map %u len %d\n",
2034                              epoch, maplen);
2035                         newmap = osdmap_apply_incremental(&p, next,
2036                                                           osdc->osdmap,
2037                                                           &osdc->client->msgr);
2038                         if (IS_ERR(newmap)) {
2039                                 err = PTR_ERR(newmap);
2040                                 goto bad;
2041                         }
2042                         BUG_ON(!newmap);
2043                         if (newmap != osdc->osdmap) {
2044                                 ceph_osdmap_destroy(osdc->osdmap);
2045                                 osdc->osdmap = newmap;
2046                         }
2047                         was_full = was_full ||
2048                                 ceph_osdmap_flag(osdc->osdmap,
2049                                                  CEPH_OSDMAP_FULL);
2050                         kick_requests(osdc, 0, was_full);
2051                 } else {
2052                         dout("ignoring incremental map %u len %d\n",
2053                              epoch, maplen);
2054                 }
2055                 p = next;
2056                 nr_maps--;
2057         }
2058         if (newmap)
2059                 goto done;
2060
2061         /* full maps */
2062         ceph_decode_32_safe(&p, end, nr_maps, bad);
2063         dout(" %d full maps\n", nr_maps);
2064         while (nr_maps) {
2065                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2066                 epoch = ceph_decode_32(&p);
2067                 maplen = ceph_decode_32(&p);
2068                 ceph_decode_need(&p, end, maplen, bad);
2069                 if (nr_maps > 1) {
2070                         dout("skipping non-latest full map %u len %d\n",
2071                              epoch, maplen);
2072                 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
2073                         dout("skipping full map %u len %d, "
2074                              "older than our %u\n", epoch, maplen,
2075                              osdc->osdmap->epoch);
2076                 } else {
2077                         int skipped_map = 0;
2078
2079                         dout("taking full map %u len %d\n", epoch, maplen);
2080                         newmap = ceph_osdmap_decode(&p, p+maplen);
2081                         if (IS_ERR(newmap)) {
2082                                 err = PTR_ERR(newmap);
2083                                 goto bad;
2084                         }
2085                         BUG_ON(!newmap);
2086                         oldmap = osdc->osdmap;
2087                         osdc->osdmap = newmap;
2088                         if (oldmap) {
2089                                 if (oldmap->epoch + 1 < newmap->epoch)
2090                                         skipped_map = 1;
2091                                 ceph_osdmap_destroy(oldmap);
2092                         }
2093                         was_full = was_full ||
2094                                 ceph_osdmap_flag(osdc->osdmap,
2095                                                  CEPH_OSDMAP_FULL);
2096                         kick_requests(osdc, skipped_map, was_full);
2097                 }
2098                 p += maplen;
2099                 nr_maps--;
2100         }
2101
2102         if (!osdc->osdmap)
2103                 goto bad;
2104 done:
2105         downgrade_write(&osdc->map_sem);
2106         ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
2107
2108         /*
2109          * subscribe to subsequent osdmap updates if full to ensure
2110          * we find out when we are no longer full and stop returning
2111          * ENOSPC.
2112          */
2113         if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
2114                 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
2115                 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
2116                 ceph_monc_request_next_osdmap(&osdc->client->monc);
2117
2118         mutex_lock(&osdc->request_mutex);
2119         __send_queued(osdc);
2120         mutex_unlock(&osdc->request_mutex);
2121         up_read(&osdc->map_sem);
2122         wake_up_all(&osdc->client->auth_wq);
2123         return;
2124
2125 bad:
2126         pr_err("osdc handle_map corrupt msg\n");
2127         ceph_msg_dump(msg);
2128         up_write(&osdc->map_sem);
2129 }
2130
2131 /*
2132  * watch/notify callback event infrastructure
2133  *
2134  * These callbacks are used both for watch and notify operations.
2135  */
2136 static void __release_event(struct kref *kref)
2137 {
2138         struct ceph_osd_event *event =
2139                 container_of(kref, struct ceph_osd_event, kref);
2140
2141         dout("__release_event %p\n", event);
2142         kfree(event);
2143 }
2144
2145 static void get_event(struct ceph_osd_event *event)
2146 {
2147         kref_get(&event->kref);
2148 }
2149
2150 void ceph_osdc_put_event(struct ceph_osd_event *event)
2151 {
2152         kref_put(&event->kref, __release_event);
2153 }
2154 EXPORT_SYMBOL(ceph_osdc_put_event);
2155
2156 static void __insert_event(struct ceph_osd_client *osdc,
2157                              struct ceph_osd_event *new)
2158 {
2159         struct rb_node **p = &osdc->event_tree.rb_node;
2160         struct rb_node *parent = NULL;
2161         struct ceph_osd_event *event = NULL;
2162
2163         while (*p) {
2164                 parent = *p;
2165                 event = rb_entry(parent, struct ceph_osd_event, node);
2166                 if (new->cookie < event->cookie)
2167                         p = &(*p)->rb_left;
2168                 else if (new->cookie > event->cookie)
2169                         p = &(*p)->rb_right;
2170                 else
2171                         BUG();
2172         }
2173
2174         rb_link_node(&new->node, parent, p);
2175         rb_insert_color(&new->node, &osdc->event_tree);
2176 }
2177
2178 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
2179                                                 u64 cookie)
2180 {
2181         struct rb_node **p = &osdc->event_tree.rb_node;
2182         struct rb_node *parent = NULL;
2183         struct ceph_osd_event *event = NULL;
2184
2185         while (*p) {
2186                 parent = *p;
2187                 event = rb_entry(parent, struct ceph_osd_event, node);
2188                 if (cookie < event->cookie)
2189                         p = &(*p)->rb_left;
2190                 else if (cookie > event->cookie)
2191                         p = &(*p)->rb_right;
2192                 else
2193                         return event;
2194         }
2195         return NULL;
2196 }
2197
2198 static void __remove_event(struct ceph_osd_event *event)
2199 {
2200         struct ceph_osd_client *osdc = event->osdc;
2201
2202         if (!RB_EMPTY_NODE(&event->node)) {
2203                 dout("__remove_event removed %p\n", event);
2204                 rb_erase(&event->node, &osdc->event_tree);
2205                 ceph_osdc_put_event(event);
2206         } else {
2207                 dout("__remove_event didn't remove %p\n", event);
2208         }
2209 }
2210
2211 int ceph_osdc_create_event(struct ceph_osd_client *osdc,
2212                            void (*event_cb)(u64, u64, u8, void *),
2213                            void *data, struct ceph_osd_event **pevent)
2214 {
2215         struct ceph_osd_event *event;
2216
2217         event = kmalloc(sizeof(*event), GFP_NOIO);
2218         if (!event)
2219                 return -ENOMEM;
2220
2221         dout("create_event %p\n", event);
2222         event->cb = event_cb;
2223         event->one_shot = 0;
2224         event->data = data;
2225         event->osdc = osdc;
2226         INIT_LIST_HEAD(&event->osd_node);
2227         RB_CLEAR_NODE(&event->node);
2228         kref_init(&event->kref);   /* one ref for us */
2229         kref_get(&event->kref);    /* one ref for the caller */
2230
2231         spin_lock(&osdc->event_lock);
2232         event->cookie = ++osdc->event_count;
2233         __insert_event(osdc, event);
2234         spin_unlock(&osdc->event_lock);
2235
2236         *pevent = event;
2237         return 0;
2238 }
2239 EXPORT_SYMBOL(ceph_osdc_create_event);
2240
2241 void ceph_osdc_cancel_event(struct ceph_osd_event *event)
2242 {
2243         struct ceph_osd_client *osdc = event->osdc;
2244
2245         dout("cancel_event %p\n", event);
2246         spin_lock(&osdc->event_lock);
2247         __remove_event(event);
2248         spin_unlock(&osdc->event_lock);
2249         ceph_osdc_put_event(event); /* caller's */
2250 }
2251 EXPORT_SYMBOL(ceph_osdc_cancel_event);
2252
2253
2254 static void do_event_work(struct work_struct *work)
2255 {
2256         struct ceph_osd_event_work *event_work =
2257                 container_of(work, struct ceph_osd_event_work, work);
2258         struct ceph_osd_event *event = event_work->event;
2259         u64 ver = event_work->ver;
2260         u64 notify_id = event_work->notify_id;
2261         u8 opcode = event_work->opcode;
2262
2263         dout("do_event_work completing %p\n", event);
2264         event->cb(ver, notify_id, opcode, event->data);
2265         dout("do_event_work completed %p\n", event);
2266         ceph_osdc_put_event(event);
2267         kfree(event_work);
2268 }
2269
2270
2271 /*
2272  * Process osd watch notifications
2273  */
2274 static void handle_watch_notify(struct ceph_osd_client *osdc,
2275                                 struct ceph_msg *msg)
2276 {
2277         void *p, *end;
2278         u8 proto_ver;
2279         u64 cookie, ver, notify_id;
2280         u8 opcode;
2281         struct ceph_osd_event *event;
2282         struct ceph_osd_event_work *event_work;
2283
2284         p = msg->front.iov_base;
2285         end = p + msg->front.iov_len;
2286
2287         ceph_decode_8_safe(&p, end, proto_ver, bad);
2288         ceph_decode_8_safe(&p, end, opcode, bad);
2289         ceph_decode_64_safe(&p, end, cookie, bad);
2290         ceph_decode_64_safe(&p, end, ver, bad);
2291         ceph_decode_64_safe(&p, end, notify_id, bad);
2292
2293         spin_lock(&osdc->event_lock);
2294         event = __find_event(osdc, cookie);
2295         if (event) {
2296                 BUG_ON(event->one_shot);
2297                 get_event(event);
2298         }
2299         spin_unlock(&osdc->event_lock);
2300         dout("handle_watch_notify cookie %lld ver %lld event %p\n",
2301              cookie, ver, event);
2302         if (event) {
2303                 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
2304                 if (!event_work) {
2305                         pr_err("couldn't allocate event_work\n");
2306                         ceph_osdc_put_event(event);
2307                         return;
2308                 }
2309                 INIT_WORK(&event_work->work, do_event_work);
2310                 event_work->event = event;
2311                 event_work->ver = ver;
2312                 event_work->notify_id = notify_id;
2313                 event_work->opcode = opcode;
2314
2315                 queue_work(osdc->notify_wq, &event_work->work);
2316         }
2317
2318         return;
2319
2320 bad:
2321         pr_err("osdc handle_watch_notify corrupt msg\n");
2322 }
2323
2324 /*
2325  * build new request AND message
2326  *
2327  */
2328 void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
2329                                 struct ceph_snap_context *snapc, u64 snap_id,
2330                                 struct timespec *mtime)
2331 {
2332         struct ceph_msg *msg = req->r_request;
2333         void *p;
2334         size_t msg_size;
2335         int flags = req->r_flags;
2336         u64 data_len;
2337         unsigned int i;
2338
2339         req->r_snapid = snap_id;
2340         req->r_snapc = ceph_get_snap_context(snapc);
2341
2342         /* encode request */
2343         msg->hdr.version = cpu_to_le16(4);
2344
2345         p = msg->front.iov_base;
2346         ceph_encode_32(&p, 1);   /* client_inc  is always 1 */
2347         req->r_request_osdmap_epoch = p;
2348         p += 4;
2349         req->r_request_flags = p;
2350         p += 4;
2351         if (req->r_flags & CEPH_OSD_FLAG_WRITE)
2352                 ceph_encode_timespec(p, mtime);
2353         p += sizeof(struct ceph_timespec);
2354         req->r_request_reassert_version = p;
2355         p += sizeof(struct ceph_eversion); /* will get filled in */
2356
2357         /* oloc */
2358         ceph_encode_8(&p, 4);
2359         ceph_encode_8(&p, 4);
2360         ceph_encode_32(&p, 8 + 4 + 4);
2361         req->r_request_pool = p;
2362         p += 8;
2363         ceph_encode_32(&p, -1);  /* preferred */
2364         ceph_encode_32(&p, 0);   /* key len */
2365
2366         ceph_encode_8(&p, 1);
2367         req->r_request_pgid = p;
2368         p += 8 + 4;
2369         ceph_encode_32(&p, -1);  /* preferred */
2370
2371         /* oid */
2372         ceph_encode_32(&p, req->r_base_oid.name_len);
2373         memcpy(p, req->r_base_oid.name, req->r_base_oid.name_len);
2374         dout("oid '%.*s' len %d\n", req->r_base_oid.name_len,
2375              req->r_base_oid.name, req->r_base_oid.name_len);
2376         p += req->r_base_oid.name_len;
2377
2378         /* ops--can imply data */
2379         ceph_encode_16(&p, (u16)req->r_num_ops);
2380         data_len = 0;
2381         for (i = 0; i < req->r_num_ops; i++) {
2382                 data_len += osd_req_encode_op(req, p, i);
2383                 p += sizeof(struct ceph_osd_op);
2384         }
2385
2386         /* snaps */
2387         ceph_encode_64(&p, req->r_snapid);
2388         ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
2389         ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
2390         if (req->r_snapc) {
2391                 for (i = 0; i < snapc->num_snaps; i++) {
2392                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
2393                 }
2394         }
2395
2396         req->r_request_attempts = p;
2397         p += 4;
2398
2399         /* data */
2400         if (flags & CEPH_OSD_FLAG_WRITE) {
2401                 u16 data_off;
2402
2403                 /*
2404                  * The header "data_off" is a hint to the receiver
2405                  * allowing it to align received data into its
2406                  * buffers such that there's no need to re-copy
2407                  * it before writing it to disk (direct I/O).
2408                  */
2409                 data_off = (u16) (off & 0xffff);
2410                 req->r_request->hdr.data_off = cpu_to_le16(data_off);
2411         }
2412         req->r_request->hdr.data_len = cpu_to_le32(data_len);
2413
2414         BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
2415         msg_size = p - msg->front.iov_base;
2416         msg->front.iov_len = msg_size;
2417         msg->hdr.front_len = cpu_to_le32(msg_size);
2418
2419         dout("build_request msg_size was %d\n", (int)msg_size);
2420 }
2421 EXPORT_SYMBOL(ceph_osdc_build_request);
2422
2423 /*
2424  * Register request, send initial attempt.
2425  */
2426 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
2427                             struct ceph_osd_request *req,
2428                             bool nofail)
2429 {
2430         int rc;
2431
2432         down_read(&osdc->map_sem);
2433         mutex_lock(&osdc->request_mutex);
2434
2435         rc = __ceph_osdc_start_request(osdc, req, nofail);
2436
2437         mutex_unlock(&osdc->request_mutex);
2438         up_read(&osdc->map_sem);
2439
2440         return rc;
2441 }
2442 EXPORT_SYMBOL(ceph_osdc_start_request);
2443
2444 /*
2445  * Unregister a registered request.  The request is not completed (i.e.
2446  * no callbacks or wakeups) - higher layers are supposed to know what
2447  * they are canceling.
2448  */
2449 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
2450 {
2451         struct ceph_osd_client *osdc = req->r_osdc;
2452
2453         mutex_lock(&osdc->request_mutex);
2454         if (req->r_linger)
2455                 __unregister_linger_request(osdc, req);
2456         __unregister_request(osdc, req);
2457         mutex_unlock(&osdc->request_mutex);
2458
2459         dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid);
2460 }
2461 EXPORT_SYMBOL(ceph_osdc_cancel_request);
2462
2463 /*
2464  * wait for a request to complete
2465  */
2466 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
2467                            struct ceph_osd_request *req)
2468 {
2469         int rc;
2470
2471         dout("%s %p tid %llu\n", __func__, req, req->r_tid);
2472
2473         rc = wait_for_completion_interruptible(&req->r_completion);
2474         if (rc < 0) {
2475                 dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid);
2476                 ceph_osdc_cancel_request(req);
2477                 complete_request(req);
2478                 return rc;
2479         }
2480
2481         dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid,
2482              req->r_result);
2483         return req->r_result;
2484 }
2485 EXPORT_SYMBOL(ceph_osdc_wait_request);
2486
2487 /*
2488  * sync - wait for all in-flight requests to flush.  avoid starvation.
2489  */
2490 void ceph_osdc_sync(struct ceph_osd_client *osdc)
2491 {
2492         struct ceph_osd_request *req;
2493         u64 last_tid, next_tid = 0;
2494
2495         mutex_lock(&osdc->request_mutex);
2496         last_tid = osdc->last_tid;
2497         while (1) {
2498                 req = __lookup_request_ge(osdc, next_tid);
2499                 if (!req)
2500                         break;
2501                 if (req->r_tid > last_tid)
2502                         break;
2503
2504                 next_tid = req->r_tid + 1;
2505                 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
2506                         continue;
2507
2508                 ceph_osdc_get_request(req);
2509                 mutex_unlock(&osdc->request_mutex);
2510                 dout("sync waiting on tid %llu (last is %llu)\n",
2511                      req->r_tid, last_tid);
2512                 wait_for_completion(&req->r_safe_completion);
2513                 mutex_lock(&osdc->request_mutex);
2514                 ceph_osdc_put_request(req);
2515         }
2516         mutex_unlock(&osdc->request_mutex);
2517         dout("sync done (thru tid %llu)\n", last_tid);
2518 }
2519 EXPORT_SYMBOL(ceph_osdc_sync);
2520
2521 /*
2522  * Call all pending notify callbacks - for use after a watch is
2523  * unregistered, to make sure no more callbacks for it will be invoked
2524  */
2525 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
2526 {
2527         flush_workqueue(osdc->notify_wq);
2528 }
2529 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
2530
2531
2532 /*
2533  * init, shutdown
2534  */
2535 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
2536 {
2537         int err;
2538
2539         dout("init\n");
2540         osdc->client = client;
2541         osdc->osdmap = NULL;
2542         init_rwsem(&osdc->map_sem);
2543         init_completion(&osdc->map_waiters);
2544         osdc->last_requested_map = 0;
2545         mutex_init(&osdc->request_mutex);
2546         osdc->last_tid = 0;
2547         osdc->osds = RB_ROOT;
2548         INIT_LIST_HEAD(&osdc->osd_lru);
2549         osdc->requests = RB_ROOT;
2550         INIT_LIST_HEAD(&osdc->req_lru);
2551         INIT_LIST_HEAD(&osdc->req_unsent);
2552         INIT_LIST_HEAD(&osdc->req_notarget);
2553         INIT_LIST_HEAD(&osdc->req_linger);
2554         osdc->num_requests = 0;
2555         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
2556         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
2557         spin_lock_init(&osdc->event_lock);
2558         osdc->event_tree = RB_ROOT;
2559         osdc->event_count = 0;
2560
2561         schedule_delayed_work(&osdc->osds_timeout_work,
2562            round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
2563
2564         err = -ENOMEM;
2565         osdc->req_mempool = mempool_create_kmalloc_pool(10,
2566                                         sizeof(struct ceph_osd_request));
2567         if (!osdc->req_mempool)
2568                 goto out;
2569
2570         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
2571                                 OSD_OP_FRONT_LEN, 10, true,
2572                                 "osd_op");
2573         if (err < 0)
2574                 goto out_mempool;
2575         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
2576                                 OSD_OPREPLY_FRONT_LEN, 10, true,
2577                                 "osd_op_reply");
2578         if (err < 0)
2579                 goto out_msgpool;
2580
2581         err = -ENOMEM;
2582         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2583         if (!osdc->notify_wq)
2584                 goto out_msgpool_reply;
2585
2586         return 0;
2587
2588 out_msgpool_reply:
2589         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
2590 out_msgpool:
2591         ceph_msgpool_destroy(&osdc->msgpool_op);
2592 out_mempool:
2593         mempool_destroy(osdc->req_mempool);
2594 out:
2595         return err;
2596 }
2597
2598 void ceph_osdc_stop(struct ceph_osd_client *osdc)
2599 {
2600         flush_workqueue(osdc->notify_wq);
2601         destroy_workqueue(osdc->notify_wq);
2602         cancel_delayed_work_sync(&osdc->timeout_work);
2603         cancel_delayed_work_sync(&osdc->osds_timeout_work);
2604         if (osdc->osdmap) {
2605                 ceph_osdmap_destroy(osdc->osdmap);
2606                 osdc->osdmap = NULL;
2607         }
2608         remove_all_osds(osdc);
2609         mempool_destroy(osdc->req_mempool);
2610         ceph_msgpool_destroy(&osdc->msgpool_op);
2611         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
2612 }
2613
2614 /*
2615  * Read some contiguous pages.  If we cross a stripe boundary, shorten
2616  * *plen.  Return number of bytes read, or error.
2617  */
2618 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2619                         struct ceph_vino vino, struct ceph_file_layout *layout,
2620                         u64 off, u64 *plen,
2621                         u32 truncate_seq, u64 truncate_size,
2622                         struct page **pages, int num_pages, int page_align)
2623 {
2624         struct ceph_osd_request *req;
2625         int rc = 0;
2626
2627         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2628              vino.snap, off, *plen);
2629         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1,
2630                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
2631                                     NULL, truncate_seq, truncate_size,
2632                                     false);
2633         if (IS_ERR(req))
2634                 return PTR_ERR(req);
2635
2636         /* it may be a short read due to an object boundary */
2637
2638         osd_req_op_extent_osd_data_pages(req, 0,
2639                                 pages, *plen, page_align, false, false);
2640
2641         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
2642              off, *plen, *plen, page_align);
2643
2644         ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
2645
2646         rc = ceph_osdc_start_request(osdc, req, false);
2647         if (!rc)
2648                 rc = ceph_osdc_wait_request(osdc, req);
2649
2650         ceph_osdc_put_request(req);
2651         dout("readpages result %d\n", rc);
2652         return rc;
2653 }
2654 EXPORT_SYMBOL(ceph_osdc_readpages);
2655
2656 /*
2657  * do a synchronous write on N pages
2658  */
2659 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2660                          struct ceph_file_layout *layout,
2661                          struct ceph_snap_context *snapc,
2662                          u64 off, u64 len,
2663                          u32 truncate_seq, u64 truncate_size,
2664                          struct timespec *mtime,
2665                          struct page **pages, int num_pages)
2666 {
2667         struct ceph_osd_request *req;
2668         int rc = 0;
2669         int page_align = off & ~PAGE_MASK;
2670
2671         BUG_ON(vino.snap != CEPH_NOSNAP);       /* snapshots aren't writeable */
2672         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1,
2673                                     CEPH_OSD_OP_WRITE,
2674                                     CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
2675                                     snapc, truncate_seq, truncate_size,
2676                                     true);
2677         if (IS_ERR(req))
2678                 return PTR_ERR(req);
2679
2680         /* it may be a short write due to an object boundary */
2681         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
2682                                 false, false);
2683         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
2684
2685         ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime);
2686
2687         rc = ceph_osdc_start_request(osdc, req, true);
2688         if (!rc)
2689                 rc = ceph_osdc_wait_request(osdc, req);
2690
2691         ceph_osdc_put_request(req);
2692         if (rc == 0)
2693                 rc = len;
2694         dout("writepages result %d\n", rc);
2695         return rc;
2696 }
2697 EXPORT_SYMBOL(ceph_osdc_writepages);
2698
2699 int ceph_osdc_setup(void)
2700 {
2701         BUG_ON(ceph_osd_request_cache);
2702         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request",
2703                                         sizeof (struct ceph_osd_request),
2704                                         __alignof__(struct ceph_osd_request),
2705                                         0, NULL);
2706
2707         return ceph_osd_request_cache ? 0 : -ENOMEM;
2708 }
2709 EXPORT_SYMBOL(ceph_osdc_setup);
2710
2711 void ceph_osdc_cleanup(void)
2712 {
2713         BUG_ON(!ceph_osd_request_cache);
2714         kmem_cache_destroy(ceph_osd_request_cache);
2715         ceph_osd_request_cache = NULL;
2716 }
2717 EXPORT_SYMBOL(ceph_osdc_cleanup);
2718
2719 /*
2720  * handle incoming message
2721  */
2722 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2723 {
2724         struct ceph_osd *osd = con->private;
2725         struct ceph_osd_client *osdc;
2726         int type = le16_to_cpu(msg->hdr.type);
2727
2728         if (!osd)
2729                 goto out;
2730         osdc = osd->o_osdc;
2731
2732         switch (type) {
2733         case CEPH_MSG_OSD_MAP:
2734                 ceph_osdc_handle_map(osdc, msg);
2735                 break;
2736         case CEPH_MSG_OSD_OPREPLY:
2737                 handle_reply(osdc, msg, con);
2738                 break;
2739         case CEPH_MSG_WATCH_NOTIFY:
2740                 handle_watch_notify(osdc, msg);
2741                 break;
2742
2743         default:
2744                 pr_err("received unknown message type %d %s\n", type,
2745                        ceph_msg_type_name(type));
2746         }
2747 out:
2748         ceph_msg_put(msg);
2749 }
2750
2751 /*
2752  * lookup and return message for incoming reply.  set up reply message
2753  * pages.
2754  */
2755 static struct ceph_msg *get_reply(struct ceph_connection *con,
2756                                   struct ceph_msg_header *hdr,
2757                                   int *skip)
2758 {
2759         struct ceph_osd *osd = con->private;
2760         struct ceph_osd_client *osdc = osd->o_osdc;
2761         struct ceph_msg *m;
2762         struct ceph_osd_request *req;
2763         int front_len = le32_to_cpu(hdr->front_len);
2764         int data_len = le32_to_cpu(hdr->data_len);
2765         u64 tid;
2766
2767         tid = le64_to_cpu(hdr->tid);
2768         mutex_lock(&osdc->request_mutex);
2769         req = __lookup_request(osdc, tid);
2770         if (!req) {
2771                 *skip = 1;
2772                 m = NULL;
2773                 dout("get_reply unknown tid %llu from osd%d\n", tid,
2774                      osd->o_osd);
2775                 goto out;
2776         }
2777
2778         if (req->r_reply->con)
2779                 dout("%s revoking msg %p from old con %p\n", __func__,
2780                      req->r_reply, req->r_reply->con);
2781         ceph_msg_revoke_incoming(req->r_reply);
2782
2783         if (front_len > req->r_reply->front_alloc_len) {
2784                 pr_warn("get_reply front %d > preallocated %d (%u#%llu)\n",
2785                         front_len, req->r_reply->front_alloc_len,
2786                         (unsigned int)con->peer_name.type,
2787                         le64_to_cpu(con->peer_name.num));
2788                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
2789                                  false);
2790                 if (!m)
2791                         goto out;
2792                 ceph_msg_put(req->r_reply);
2793                 req->r_reply = m;
2794         }
2795         m = ceph_msg_get(req->r_reply);
2796
2797         if (data_len > 0) {
2798                 struct ceph_osd_data *osd_data;
2799
2800                 /*
2801                  * XXX This is assuming there is only one op containing
2802                  * XXX page data.  Probably OK for reads, but this
2803                  * XXX ought to be done more generally.
2804                  */
2805                 osd_data = osd_req_op_extent_osd_data(req, 0);
2806                 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
2807                         if (osd_data->pages &&
2808                                 unlikely(osd_data->length < data_len)) {
2809
2810                                 pr_warn("tid %lld reply has %d bytes we had only %llu bytes ready\n",
2811                                         tid, data_len, osd_data->length);
2812                                 *skip = 1;
2813                                 ceph_msg_put(m);
2814                                 m = NULL;
2815                                 goto out;
2816                         }
2817                 }
2818         }
2819         *skip = 0;
2820         dout("get_reply tid %lld %p\n", tid, m);
2821
2822 out:
2823         mutex_unlock(&osdc->request_mutex);
2824         return m;
2825
2826 }
2827
2828 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2829                                   struct ceph_msg_header *hdr,
2830                                   int *skip)
2831 {
2832         struct ceph_osd *osd = con->private;
2833         int type = le16_to_cpu(hdr->type);
2834         int front = le32_to_cpu(hdr->front_len);
2835
2836         *skip = 0;
2837         switch (type) {
2838         case CEPH_MSG_OSD_MAP:
2839         case CEPH_MSG_WATCH_NOTIFY:
2840                 return ceph_msg_new(type, front, GFP_NOFS, false);
2841         case CEPH_MSG_OSD_OPREPLY:
2842                 return get_reply(con, hdr, skip);
2843         default:
2844                 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2845                         osd->o_osd);
2846                 *skip = 1;
2847                 return NULL;
2848         }
2849 }
2850
2851 /*
2852  * Wrappers to refcount containing ceph_osd struct
2853  */
2854 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2855 {
2856         struct ceph_osd *osd = con->private;
2857         if (get_osd(osd))
2858                 return con;
2859         return NULL;
2860 }
2861
2862 static void put_osd_con(struct ceph_connection *con)
2863 {
2864         struct ceph_osd *osd = con->private;
2865         put_osd(osd);
2866 }
2867
2868 /*
2869  * authentication
2870  */
2871 /*
2872  * Note: returned pointer is the address of a structure that's
2873  * managed separately.  Caller must *not* attempt to free it.
2874  */
2875 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2876                                         int *proto, int force_new)
2877 {
2878         struct ceph_osd *o = con->private;
2879         struct ceph_osd_client *osdc = o->o_osdc;
2880         struct ceph_auth_client *ac = osdc->client->monc.auth;
2881         struct ceph_auth_handshake *auth = &o->o_auth;
2882
2883         if (force_new && auth->authorizer) {
2884                 ceph_auth_destroy_authorizer(ac, auth->authorizer);
2885                 auth->authorizer = NULL;
2886         }
2887         if (!auth->authorizer) {
2888                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2889                                                       auth);
2890                 if (ret)
2891                         return ERR_PTR(ret);
2892         } else {
2893                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2894                                                      auth);
2895                 if (ret)
2896                         return ERR_PTR(ret);
2897         }
2898         *proto = ac->protocol;
2899
2900         return auth;
2901 }
2902
2903
2904 static int verify_authorizer_reply(struct ceph_connection *con, int len)
2905 {
2906         struct ceph_osd *o = con->private;
2907         struct ceph_osd_client *osdc = o->o_osdc;
2908         struct ceph_auth_client *ac = osdc->client->monc.auth;
2909
2910         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
2911 }
2912
2913 static int invalidate_authorizer(struct ceph_connection *con)
2914 {
2915         struct ceph_osd *o = con->private;
2916         struct ceph_osd_client *osdc = o->o_osdc;
2917         struct ceph_auth_client *ac = osdc->client->monc.auth;
2918
2919         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2920         return ceph_monc_validate_auth(&osdc->client->monc);
2921 }
2922
2923 static const struct ceph_connection_operations osd_con_ops = {
2924         .get = get_osd_con,
2925         .put = put_osd_con,
2926         .dispatch = dispatch,
2927         .get_authorizer = get_authorizer,
2928         .verify_authorizer_reply = verify_authorizer_reply,
2929         .invalidate_authorizer = invalidate_authorizer,
2930         .alloc_msg = alloc_msg,
2931         .fault = osd_reset,
2932 };