]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ceph/caps.c
ceph: update issue_seq on cap grant
[net-next-2.6.git] / fs / ceph / caps.c
CommitLineData
a8599bd8
SW
1#include "ceph_debug.h"
2
3#include <linux/fs.h>
4#include <linux/kernel.h>
5#include <linux/sched.h>
5a0e3ad6 6#include <linux/slab.h>
a8599bd8
SW
7#include <linux/vmalloc.h>
8#include <linux/wait.h>
f1a3d572 9#include <linux/writeback.h>
a8599bd8
SW
10
11#include "super.h"
12#include "decode.h"
13#include "messenger.h"
14
15/*
16 * Capability management
17 *
18 * The Ceph metadata servers control client access to inode metadata
19 * and file data by issuing capabilities, granting clients permission
20 * to read and/or write both inode field and file data to OSDs
21 * (storage nodes). Each capability consists of a set of bits
22 * indicating which operations are allowed.
23 *
24 * If the client holds a *_SHARED cap, the client has a coherent value
25 * that can be safely read from the cached inode.
26 *
27 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
28 * client is allowed to change inode attributes (e.g., file size,
29 * mtime), note its dirty state in the ceph_cap, and asynchronously
30 * flush that metadata change to the MDS.
31 *
32 * In the event of a conflicting operation (perhaps by another
33 * client), the MDS will revoke the conflicting client capabilities.
34 *
35 * In order for a client to cache an inode, it must hold a capability
36 * with at least one MDS server. When inodes are released, release
37 * notifications are batched and periodically sent en masse to the MDS
38 * cluster to release server state.
39 */
40
41
42/*
43 * Generate readable cap strings for debugging output.
44 */
45#define MAX_CAP_STR 20
46static char cap_str[MAX_CAP_STR][40];
47static DEFINE_SPINLOCK(cap_str_lock);
48static int last_cap_str;
49
50static char *gcap_string(char *s, int c)
51{
52 if (c & CEPH_CAP_GSHARED)
53 *s++ = 's';
54 if (c & CEPH_CAP_GEXCL)
55 *s++ = 'x';
56 if (c & CEPH_CAP_GCACHE)
57 *s++ = 'c';
58 if (c & CEPH_CAP_GRD)
59 *s++ = 'r';
60 if (c & CEPH_CAP_GWR)
61 *s++ = 'w';
62 if (c & CEPH_CAP_GBUFFER)
63 *s++ = 'b';
64 if (c & CEPH_CAP_GLAZYIO)
65 *s++ = 'l';
66 return s;
67}
68
69const char *ceph_cap_string(int caps)
70{
71 int i;
72 char *s;
73 int c;
74
75 spin_lock(&cap_str_lock);
76 i = last_cap_str++;
77 if (last_cap_str == MAX_CAP_STR)
78 last_cap_str = 0;
79 spin_unlock(&cap_str_lock);
80
81 s = cap_str[i];
82
83 if (caps & CEPH_CAP_PIN)
84 *s++ = 'p';
85
86 c = (caps >> CEPH_CAP_SAUTH) & 3;
87 if (c) {
88 *s++ = 'A';
89 s = gcap_string(s, c);
90 }
91
92 c = (caps >> CEPH_CAP_SLINK) & 3;
93 if (c) {
94 *s++ = 'L';
95 s = gcap_string(s, c);
96 }
97
98 c = (caps >> CEPH_CAP_SXATTR) & 3;
99 if (c) {
100 *s++ = 'X';
101 s = gcap_string(s, c);
102 }
103
104 c = caps >> CEPH_CAP_SFILE;
105 if (c) {
106 *s++ = 'F';
107 s = gcap_string(s, c);
108 }
109
110 if (s == cap_str[i])
111 *s++ = '-';
112 *s = 0;
113 return cap_str[i];
114}
115
37151668 116void ceph_caps_init(struct ceph_mds_client *mdsc)
a8599bd8 117{
37151668
YS
118 INIT_LIST_HEAD(&mdsc->caps_list);
119 spin_lock_init(&mdsc->caps_list_lock);
a8599bd8
SW
120}
121
37151668 122void ceph_caps_finalize(struct ceph_mds_client *mdsc)
a8599bd8
SW
123{
124 struct ceph_cap *cap;
125
37151668
YS
126 spin_lock(&mdsc->caps_list_lock);
127 while (!list_empty(&mdsc->caps_list)) {
128 cap = list_first_entry(&mdsc->caps_list,
129 struct ceph_cap, caps_item);
a8599bd8
SW
130 list_del(&cap->caps_item);
131 kmem_cache_free(ceph_cap_cachep, cap);
132 }
37151668
YS
133 mdsc->caps_total_count = 0;
134 mdsc->caps_avail_count = 0;
135 mdsc->caps_use_count = 0;
136 mdsc->caps_reserve_count = 0;
137 mdsc->caps_min_count = 0;
138 spin_unlock(&mdsc->caps_list_lock);
85ccce43
SW
139}
140
37151668 141void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
85ccce43 142{
37151668
YS
143 spin_lock(&mdsc->caps_list_lock);
144 mdsc->caps_min_count += delta;
145 BUG_ON(mdsc->caps_min_count < 0);
146 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
147}
148
37151668
YS
149int ceph_reserve_caps(struct ceph_mds_client *mdsc,
150 struct ceph_cap_reservation *ctx, int need)
a8599bd8
SW
151{
152 int i;
153 struct ceph_cap *cap;
154 int have;
155 int alloc = 0;
156 LIST_HEAD(newcaps);
157 int ret = 0;
158
159 dout("reserve caps ctx=%p need=%d\n", ctx, need);
160
161 /* first reserve any caps that are already allocated */
37151668
YS
162 spin_lock(&mdsc->caps_list_lock);
163 if (mdsc->caps_avail_count >= need)
a8599bd8
SW
164 have = need;
165 else
37151668
YS
166 have = mdsc->caps_avail_count;
167 mdsc->caps_avail_count -= have;
168 mdsc->caps_reserve_count += have;
169 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
170 mdsc->caps_reserve_count +
171 mdsc->caps_avail_count);
172 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
173
174 for (i = have; i < need; i++) {
175 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
176 if (!cap) {
177 ret = -ENOMEM;
178 goto out_alloc_count;
179 }
180 list_add(&cap->caps_item, &newcaps);
181 alloc++;
182 }
183 BUG_ON(have + alloc != need);
184
37151668
YS
185 spin_lock(&mdsc->caps_list_lock);
186 mdsc->caps_total_count += alloc;
187 mdsc->caps_reserve_count += alloc;
188 list_splice(&newcaps, &mdsc->caps_list);
a8599bd8 189
37151668
YS
190 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
191 mdsc->caps_reserve_count +
192 mdsc->caps_avail_count);
193 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
194
195 ctx->count = need;
196 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
37151668
YS
197 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
198 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8
SW
199 return 0;
200
201out_alloc_count:
202 /* we didn't manage to reserve as much as we needed */
203 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
204 ctx, need, have);
205 return ret;
206}
207
37151668
YS
208int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
209 struct ceph_cap_reservation *ctx)
a8599bd8
SW
210{
211 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
212 if (ctx->count) {
37151668
YS
213 spin_lock(&mdsc->caps_list_lock);
214 BUG_ON(mdsc->caps_reserve_count < ctx->count);
215 mdsc->caps_reserve_count -= ctx->count;
216 mdsc->caps_avail_count += ctx->count;
a8599bd8
SW
217 ctx->count = 0;
218 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
37151668
YS
219 mdsc->caps_total_count, mdsc->caps_use_count,
220 mdsc->caps_reserve_count, mdsc->caps_avail_count);
221 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
222 mdsc->caps_reserve_count +
223 mdsc->caps_avail_count);
224 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
225 }
226 return 0;
227}
228
37151668
YS
229static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
230 struct ceph_cap_reservation *ctx)
a8599bd8
SW
231{
232 struct ceph_cap *cap = NULL;
233
234 /* temporary, until we do something about cap import/export */
443b3760
SW
235 if (!ctx) {
236 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
237 if (cap) {
37151668
YS
238 mdsc->caps_use_count++;
239 mdsc->caps_total_count++;
443b3760
SW
240 }
241 return cap;
242 }
a8599bd8 243
37151668 244 spin_lock(&mdsc->caps_list_lock);
a8599bd8 245 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
37151668
YS
246 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
247 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8 248 BUG_ON(!ctx->count);
37151668
YS
249 BUG_ON(ctx->count > mdsc->caps_reserve_count);
250 BUG_ON(list_empty(&mdsc->caps_list));
a8599bd8
SW
251
252 ctx->count--;
37151668
YS
253 mdsc->caps_reserve_count--;
254 mdsc->caps_use_count++;
a8599bd8 255
37151668 256 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
a8599bd8
SW
257 list_del(&cap->caps_item);
258
37151668
YS
259 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
260 mdsc->caps_reserve_count + mdsc->caps_avail_count);
261 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
262 return cap;
263}
264
37151668 265void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
a8599bd8 266{
37151668 267 spin_lock(&mdsc->caps_list_lock);
7c1332b8 268 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
37151668
YS
269 cap, mdsc->caps_total_count, mdsc->caps_use_count,
270 mdsc->caps_reserve_count, mdsc->caps_avail_count);
271 mdsc->caps_use_count--;
a8599bd8 272 /*
85ccce43
SW
273 * Keep some preallocated caps around (ceph_min_count), to
274 * avoid lots of free/alloc churn.
a8599bd8 275 */
37151668
YS
276 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
277 mdsc->caps_min_count) {
278 mdsc->caps_total_count--;
a8599bd8
SW
279 kmem_cache_free(ceph_cap_cachep, cap);
280 } else {
37151668
YS
281 mdsc->caps_avail_count++;
282 list_add(&cap->caps_item, &mdsc->caps_list);
a8599bd8
SW
283 }
284
37151668
YS
285 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
286 mdsc->caps_reserve_count + mdsc->caps_avail_count);
287 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
288}
289
290void ceph_reservation_status(struct ceph_client *client,
85ccce43
SW
291 int *total, int *avail, int *used, int *reserved,
292 int *min)
a8599bd8 293{
37151668
YS
294 struct ceph_mds_client *mdsc = &client->mdsc;
295
a8599bd8 296 if (total)
37151668 297 *total = mdsc->caps_total_count;
a8599bd8 298 if (avail)
37151668 299 *avail = mdsc->caps_avail_count;
a8599bd8 300 if (used)
37151668 301 *used = mdsc->caps_use_count;
a8599bd8 302 if (reserved)
37151668 303 *reserved = mdsc->caps_reserve_count;
85ccce43 304 if (min)
37151668 305 *min = mdsc->caps_min_count;
a8599bd8
SW
306}
307
308/*
309 * Find ceph_cap for given mds, if any.
310 *
311 * Called with i_lock held.
312 */
313static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
314{
315 struct ceph_cap *cap;
316 struct rb_node *n = ci->i_caps.rb_node;
317
318 while (n) {
319 cap = rb_entry(n, struct ceph_cap, ci_node);
320 if (mds < cap->mds)
321 n = n->rb_left;
322 else if (mds > cap->mds)
323 n = n->rb_right;
324 else
325 return cap;
326 }
327 return NULL;
328}
329
2bc50259
GF
330struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
331{
332 struct ceph_cap *cap;
333
334 spin_lock(&ci->vfs_inode.i_lock);
335 cap = __get_cap_for_mds(ci, mds);
336 spin_unlock(&ci->vfs_inode.i_lock);
337 return cap;
338}
339
a8599bd8 340/*
33caad32 341 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
a8599bd8 342 */
ca81f3f6 343static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
a8599bd8
SW
344{
345 struct ceph_cap *cap;
346 int mds = -1;
347 struct rb_node *p;
348
33caad32 349 /* prefer mds with WR|BUFFER|EXCL caps */
a8599bd8
SW
350 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
351 cap = rb_entry(p, struct ceph_cap, ci_node);
352 mds = cap->mds;
a8599bd8
SW
353 if (cap->issued & (CEPH_CAP_FILE_WR |
354 CEPH_CAP_FILE_BUFFER |
355 CEPH_CAP_FILE_EXCL))
356 break;
357 }
358 return mds;
359}
360
361int ceph_get_cap_mds(struct inode *inode)
362{
363 int mds;
364 spin_lock(&inode->i_lock);
ca81f3f6 365 mds = __ceph_get_cap_mds(ceph_inode(inode));
a8599bd8
SW
366 spin_unlock(&inode->i_lock);
367 return mds;
368}
369
370/*
371 * Called under i_lock.
372 */
373static void __insert_cap_node(struct ceph_inode_info *ci,
374 struct ceph_cap *new)
375{
376 struct rb_node **p = &ci->i_caps.rb_node;
377 struct rb_node *parent = NULL;
378 struct ceph_cap *cap = NULL;
379
380 while (*p) {
381 parent = *p;
382 cap = rb_entry(parent, struct ceph_cap, ci_node);
383 if (new->mds < cap->mds)
384 p = &(*p)->rb_left;
385 else if (new->mds > cap->mds)
386 p = &(*p)->rb_right;
387 else
388 BUG();
389 }
390
391 rb_link_node(&new->ci_node, parent, p);
392 rb_insert_color(&new->ci_node, &ci->i_caps);
393}
394
395/*
396 * (re)set cap hold timeouts, which control the delayed release
397 * of unused caps back to the MDS. Should be called on cap use.
398 */
399static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
400 struct ceph_inode_info *ci)
401{
6b805185 402 struct ceph_mount_args *ma = mdsc->client->mount_args;
a8599bd8
SW
403
404 ci->i_hold_caps_min = round_jiffies(jiffies +
405 ma->caps_wanted_delay_min * HZ);
406 ci->i_hold_caps_max = round_jiffies(jiffies +
407 ma->caps_wanted_delay_max * HZ);
408 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
409 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
410}
411
412/*
413 * (Re)queue cap at the end of the delayed cap release list.
414 *
415 * If I_FLUSH is set, leave the inode at the front of the list.
416 *
417 * Caller holds i_lock
418 * -> we take mdsc->cap_delay_lock
419 */
420static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
421 struct ceph_inode_info *ci)
422{
423 __cap_set_timeouts(mdsc, ci);
424 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
425 ci->i_ceph_flags, ci->i_hold_caps_max);
426 if (!mdsc->stopping) {
427 spin_lock(&mdsc->cap_delay_lock);
428 if (!list_empty(&ci->i_cap_delay_list)) {
429 if (ci->i_ceph_flags & CEPH_I_FLUSH)
430 goto no_change;
431 list_del_init(&ci->i_cap_delay_list);
432 }
433 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
434no_change:
435 spin_unlock(&mdsc->cap_delay_lock);
436 }
437}
438
439/*
440 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
441 * indicating we should send a cap message to flush dirty metadata
442 * asap, and move to the front of the delayed cap list.
443 */
444static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
445 struct ceph_inode_info *ci)
446{
447 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
448 spin_lock(&mdsc->cap_delay_lock);
449 ci->i_ceph_flags |= CEPH_I_FLUSH;
450 if (!list_empty(&ci->i_cap_delay_list))
451 list_del_init(&ci->i_cap_delay_list);
452 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
453 spin_unlock(&mdsc->cap_delay_lock);
454}
455
456/*
457 * Cancel delayed work on cap.
458 *
459 * Caller must hold i_lock.
460 */
461static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
462 struct ceph_inode_info *ci)
463{
464 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
465 if (list_empty(&ci->i_cap_delay_list))
466 return;
467 spin_lock(&mdsc->cap_delay_lock);
468 list_del_init(&ci->i_cap_delay_list);
469 spin_unlock(&mdsc->cap_delay_lock);
470}
471
472/*
473 * Common issue checks for add_cap, handle_cap_grant.
474 */
475static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
476 unsigned issued)
477{
478 unsigned had = __ceph_caps_issued(ci, NULL);
479
480 /*
481 * Each time we receive FILE_CACHE anew, we increment
482 * i_rdcache_gen.
483 */
2962507c
SW
484 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
485 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
a8599bd8
SW
486 ci->i_rdcache_gen++;
487
488 /*
489 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
490 * don't know what happened to this directory while we didn't
491 * have the cap.
492 */
493 if ((issued & CEPH_CAP_FILE_SHARED) &&
494 (had & CEPH_CAP_FILE_SHARED) == 0) {
495 ci->i_shared_gen++;
496 if (S_ISDIR(ci->vfs_inode.i_mode)) {
497 dout(" marking %p NOT complete\n", &ci->vfs_inode);
498 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
499 }
500 }
501}
502
503/*
504 * Add a capability under the given MDS session.
505 *
506 * Caller should hold session snap_rwsem (read) and s_mutex.
507 *
508 * @fmode is the open file mode, if we are opening a file, otherwise
509 * it is < 0. (This is so we can atomically add the cap and add an
510 * open file reference to it.)
511 */
512int ceph_add_cap(struct inode *inode,
513 struct ceph_mds_session *session, u64 cap_id,
514 int fmode, unsigned issued, unsigned wanted,
515 unsigned seq, unsigned mseq, u64 realmino, int flags,
516 struct ceph_cap_reservation *caps_reservation)
517{
518 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
519 struct ceph_inode_info *ci = ceph_inode(inode);
520 struct ceph_cap *new_cap = NULL;
521 struct ceph_cap *cap;
522 int mds = session->s_mds;
523 int actual_wanted;
524
525 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
526 session->s_mds, cap_id, ceph_cap_string(issued), seq);
527
528 /*
529 * If we are opening the file, include file mode wanted bits
530 * in wanted.
531 */
532 if (fmode >= 0)
533 wanted |= ceph_caps_for_mode(fmode);
534
535retry:
536 spin_lock(&inode->i_lock);
537 cap = __get_cap_for_mds(ci, mds);
538 if (!cap) {
539 if (new_cap) {
540 cap = new_cap;
541 new_cap = NULL;
542 } else {
543 spin_unlock(&inode->i_lock);
37151668 544 new_cap = get_cap(mdsc, caps_reservation);
a8599bd8
SW
545 if (new_cap == NULL)
546 return -ENOMEM;
547 goto retry;
548 }
549
550 cap->issued = 0;
551 cap->implemented = 0;
552 cap->mds = mds;
553 cap->mds_wanted = 0;
554
555 cap->ci = ci;
556 __insert_cap_node(ci, cap);
557
558 /* clear out old exporting info? (i.e. on cap import) */
559 if (ci->i_cap_exporting_mds == mds) {
560 ci->i_cap_exporting_issued = 0;
561 ci->i_cap_exporting_mseq = 0;
562 ci->i_cap_exporting_mds = -1;
563 }
564
565 /* add to session cap list */
566 cap->session = session;
567 spin_lock(&session->s_cap_lock);
568 list_add_tail(&cap->session_caps, &session->s_caps);
569 session->s_nr_caps++;
570 spin_unlock(&session->s_cap_lock);
571 }
572
573 if (!ci->i_snap_realm) {
574 /*
575 * add this inode to the appropriate snap realm
576 */
577 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
578 realmino);
579 if (realm) {
580 ceph_get_snap_realm(mdsc, realm);
581 spin_lock(&realm->inodes_with_caps_lock);
582 ci->i_snap_realm = realm;
583 list_add(&ci->i_snap_realm_item,
584 &realm->inodes_with_caps);
585 spin_unlock(&realm->inodes_with_caps_lock);
586 } else {
587 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
588 realmino);
b8cd07e7 589 WARN_ON(!realm);
a8599bd8
SW
590 }
591 }
592
593 __check_cap_issue(ci, cap, issued);
594
595 /*
596 * If we are issued caps we don't want, or the mds' wanted
597 * value appears to be off, queue a check so we'll release
598 * later and/or update the mds wanted value.
599 */
600 actual_wanted = __ceph_caps_wanted(ci);
601 if ((wanted & ~actual_wanted) ||
602 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
603 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
604 ceph_cap_string(issued), ceph_cap_string(wanted),
605 ceph_cap_string(actual_wanted));
606 __cap_delay_requeue(mdsc, ci);
607 }
608
609 if (flags & CEPH_CAP_FLAG_AUTH)
610 ci->i_auth_cap = cap;
611 else if (ci->i_auth_cap == cap)
612 ci->i_auth_cap = NULL;
613
614 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
615 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
616 ceph_cap_string(issued|cap->issued), seq, mds);
617 cap->cap_id = cap_id;
618 cap->issued = issued;
619 cap->implemented |= issued;
620 cap->mds_wanted |= wanted;
621 cap->seq = seq;
622 cap->issue_seq = seq;
623 cap->mseq = mseq;
685f9a5d 624 cap->cap_gen = session->s_cap_gen;
a8599bd8
SW
625
626 if (fmode >= 0)
627 __ceph_get_fmode(ci, fmode);
628 spin_unlock(&inode->i_lock);
03066f23 629 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
630 return 0;
631}
632
633/*
634 * Return true if cap has not timed out and belongs to the current
635 * generation of the MDS session (i.e. has not gone 'stale' due to
636 * us losing touch with the mds).
637 */
638static int __cap_is_valid(struct ceph_cap *cap)
639{
640 unsigned long ttl;
cdac8303 641 u32 gen;
a8599bd8
SW
642
643 spin_lock(&cap->session->s_cap_lock);
644 gen = cap->session->s_cap_gen;
645 ttl = cap->session->s_cap_ttl;
646 spin_unlock(&cap->session->s_cap_lock);
647
685f9a5d 648 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
a8599bd8
SW
649 dout("__cap_is_valid %p cap %p issued %s "
650 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
685f9a5d 651 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
a8599bd8
SW
652 return 0;
653 }
654
655 return 1;
656}
657
658/*
659 * Return set of valid cap bits issued to us. Note that caps time
660 * out, and may be invalidated in bulk if the client session times out
661 * and session->s_cap_gen is bumped.
662 */
663int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
664{
7af8f1e4 665 int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
a8599bd8
SW
666 struct ceph_cap *cap;
667 struct rb_node *p;
668
669 if (implemented)
670 *implemented = 0;
671 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
672 cap = rb_entry(p, struct ceph_cap, ci_node);
673 if (!__cap_is_valid(cap))
674 continue;
675 dout("__ceph_caps_issued %p cap %p issued %s\n",
676 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
677 have |= cap->issued;
678 if (implemented)
679 *implemented |= cap->implemented;
680 }
681 return have;
682}
683
684/*
685 * Get cap bits issued by caps other than @ocap
686 */
687int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
688{
689 int have = ci->i_snap_caps;
690 struct ceph_cap *cap;
691 struct rb_node *p;
692
693 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
694 cap = rb_entry(p, struct ceph_cap, ci_node);
695 if (cap == ocap)
696 continue;
697 if (!__cap_is_valid(cap))
698 continue;
699 have |= cap->issued;
700 }
701 return have;
702}
703
704/*
705 * Move a cap to the end of the LRU (oldest caps at list head, newest
706 * at list tail).
707 */
708static void __touch_cap(struct ceph_cap *cap)
709{
710 struct ceph_mds_session *s = cap->session;
711
a8599bd8 712 spin_lock(&s->s_cap_lock);
7c1332b8 713 if (s->s_cap_iterator == NULL) {
5dacf091
SW
714 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
715 s->s_mds);
716 list_move_tail(&cap->session_caps, &s->s_caps);
717 } else {
718 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
719 &cap->ci->vfs_inode, cap, s->s_mds);
720 }
a8599bd8
SW
721 spin_unlock(&s->s_cap_lock);
722}
723
724/*
725 * Check if we hold the given mask. If so, move the cap(s) to the
726 * front of their respective LRUs. (This is the preferred way for
727 * callers to check for caps they want.)
728 */
729int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
730{
731 struct ceph_cap *cap;
732 struct rb_node *p;
733 int have = ci->i_snap_caps;
734
735 if ((have & mask) == mask) {
736 dout("__ceph_caps_issued_mask %p snap issued %s"
737 " (mask %s)\n", &ci->vfs_inode,
738 ceph_cap_string(have),
739 ceph_cap_string(mask));
740 return 1;
741 }
742
743 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
744 cap = rb_entry(p, struct ceph_cap, ci_node);
745 if (!__cap_is_valid(cap))
746 continue;
747 if ((cap->issued & mask) == mask) {
748 dout("__ceph_caps_issued_mask %p cap %p issued %s"
749 " (mask %s)\n", &ci->vfs_inode, cap,
750 ceph_cap_string(cap->issued),
751 ceph_cap_string(mask));
752 if (touch)
753 __touch_cap(cap);
754 return 1;
755 }
756
757 /* does a combination of caps satisfy mask? */
758 have |= cap->issued;
759 if ((have & mask) == mask) {
760 dout("__ceph_caps_issued_mask %p combo issued %s"
761 " (mask %s)\n", &ci->vfs_inode,
762 ceph_cap_string(cap->issued),
763 ceph_cap_string(mask));
764 if (touch) {
765 struct rb_node *q;
766
767 /* touch this + preceeding caps */
768 __touch_cap(cap);
769 for (q = rb_first(&ci->i_caps); q != p;
770 q = rb_next(q)) {
771 cap = rb_entry(q, struct ceph_cap,
772 ci_node);
773 if (!__cap_is_valid(cap))
774 continue;
775 __touch_cap(cap);
776 }
777 }
778 return 1;
779 }
780 }
781
782 return 0;
783}
784
785/*
786 * Return true if mask caps are currently being revoked by an MDS.
787 */
788int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
789{
790 struct inode *inode = &ci->vfs_inode;
791 struct ceph_cap *cap;
792 struct rb_node *p;
793 int ret = 0;
794
795 spin_lock(&inode->i_lock);
796 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
797 cap = rb_entry(p, struct ceph_cap, ci_node);
798 if (__cap_is_valid(cap) &&
799 (cap->implemented & ~cap->issued & mask)) {
800 ret = 1;
801 break;
802 }
803 }
804 spin_unlock(&inode->i_lock);
805 dout("ceph_caps_revoking %p %s = %d\n", inode,
806 ceph_cap_string(mask), ret);
807 return ret;
808}
809
810int __ceph_caps_used(struct ceph_inode_info *ci)
811{
812 int used = 0;
813 if (ci->i_pin_ref)
814 used |= CEPH_CAP_PIN;
815 if (ci->i_rd_ref)
816 used |= CEPH_CAP_FILE_RD;
a43fb731 817 if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
a8599bd8
SW
818 used |= CEPH_CAP_FILE_CACHE;
819 if (ci->i_wr_ref)
820 used |= CEPH_CAP_FILE_WR;
821 if (ci->i_wrbuffer_ref)
822 used |= CEPH_CAP_FILE_BUFFER;
823 return used;
824}
825
826/*
827 * wanted, by virtue of open file modes
828 */
829int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
830{
831 int want = 0;
832 int mode;
33caad32 833 for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
a8599bd8
SW
834 if (ci->i_nr_by_mode[mode])
835 want |= ceph_caps_for_mode(mode);
836 return want;
837}
838
839/*
840 * Return caps we have registered with the MDS(s) as 'wanted'.
841 */
842int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
843{
844 struct ceph_cap *cap;
845 struct rb_node *p;
846 int mds_wanted = 0;
847
848 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
849 cap = rb_entry(p, struct ceph_cap, ci_node);
850 if (!__cap_is_valid(cap))
851 continue;
852 mds_wanted |= cap->mds_wanted;
853 }
854 return mds_wanted;
855}
856
857/*
858 * called under i_lock
859 */
860static int __ceph_is_any_caps(struct ceph_inode_info *ci)
861{
862 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
863}
864
865/*
f818a736
SW
866 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
867 *
a6369741
SW
868 * caller should hold i_lock.
869 * caller will not hold session s_mutex if called from destroy_inode.
a8599bd8 870 */
7c1332b8 871void __ceph_remove_cap(struct ceph_cap *cap)
a8599bd8
SW
872{
873 struct ceph_mds_session *session = cap->session;
874 struct ceph_inode_info *ci = cap->ci;
640ef79d
CR
875 struct ceph_mds_client *mdsc =
876 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
f818a736 877 int removed = 0;
a8599bd8
SW
878
879 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
880
7c1332b8
SW
881 /* remove from session list */
882 spin_lock(&session->s_cap_lock);
883 if (session->s_cap_iterator == cap) {
884 /* not yet, we are iterating over this very cap */
885 dout("__ceph_remove_cap delaying %p removal from session %p\n",
886 cap, cap->session);
887 } else {
888 list_del_init(&cap->session_caps);
889 session->s_nr_caps--;
890 cap->session = NULL;
f818a736 891 removed = 1;
7c1332b8 892 }
f818a736
SW
893 /* protect backpointer with s_cap_lock: see iterate_session_caps */
894 cap->ci = NULL;
7c1332b8
SW
895 spin_unlock(&session->s_cap_lock);
896
f818a736
SW
897 /* remove from inode list */
898 rb_erase(&cap->ci_node, &ci->i_caps);
899 if (ci->i_auth_cap == cap)
900 ci->i_auth_cap = NULL;
901
902 if (removed)
37151668 903 ceph_put_cap(mdsc, cap);
a8599bd8
SW
904
905 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
906 struct ceph_snap_realm *realm = ci->i_snap_realm;
907 spin_lock(&realm->inodes_with_caps_lock);
908 list_del_init(&ci->i_snap_realm_item);
909 ci->i_snap_realm_counter++;
910 ci->i_snap_realm = NULL;
911 spin_unlock(&realm->inodes_with_caps_lock);
912 ceph_put_snap_realm(mdsc, realm);
913 }
914 if (!__ceph_is_any_real_caps(ci))
915 __cap_delay_cancel(mdsc, ci);
916}
917
918/*
919 * Build and send a cap message to the given MDS.
920 *
921 * Caller should be holding s_mutex.
922 */
923static int send_cap_msg(struct ceph_mds_session *session,
924 u64 ino, u64 cid, int op,
925 int caps, int wanted, int dirty,
926 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
927 u64 size, u64 max_size,
928 struct timespec *mtime, struct timespec *atime,
929 u64 time_warp_seq,
930 uid_t uid, gid_t gid, mode_t mode,
931 u64 xattr_version,
932 struct ceph_buffer *xattrs_buf,
933 u64 follows)
934{
935 struct ceph_mds_caps *fc;
936 struct ceph_msg *msg;
937
938 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
939 " seq %u/%u mseq %u follows %lld size %llu/%llu"
940 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
941 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
942 ceph_cap_string(dirty),
943 seq, issue_seq, mseq, follows, size, max_size,
944 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
945
34d23762 946 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS);
a79832f2
SW
947 if (!msg)
948 return -ENOMEM;
a8599bd8 949
6df058c0 950 msg->hdr.tid = cpu_to_le64(flush_tid);
a8599bd8 951
6df058c0 952 fc = msg->front.iov_base;
a8599bd8
SW
953 memset(fc, 0, sizeof(*fc));
954
955 fc->cap_id = cpu_to_le64(cid);
956 fc->op = cpu_to_le32(op);
957 fc->seq = cpu_to_le32(seq);
a8599bd8
SW
958 fc->issue_seq = cpu_to_le32(issue_seq);
959 fc->migrate_seq = cpu_to_le32(mseq);
960 fc->caps = cpu_to_le32(caps);
961 fc->wanted = cpu_to_le32(wanted);
962 fc->dirty = cpu_to_le32(dirty);
963 fc->ino = cpu_to_le64(ino);
964 fc->snap_follows = cpu_to_le64(follows);
965
966 fc->size = cpu_to_le64(size);
967 fc->max_size = cpu_to_le64(max_size);
968 if (mtime)
969 ceph_encode_timespec(&fc->mtime, mtime);
970 if (atime)
971 ceph_encode_timespec(&fc->atime, atime);
972 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
973
974 fc->uid = cpu_to_le32(uid);
975 fc->gid = cpu_to_le32(gid);
976 fc->mode = cpu_to_le32(mode);
977
978 fc->xattr_version = cpu_to_le64(xattr_version);
979 if (xattrs_buf) {
980 msg->middle = ceph_buffer_get(xattrs_buf);
981 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
982 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
983 }
984
985 ceph_con_send(&session->s_con, msg);
986 return 0;
987}
988
3d7ded4d
SW
989static void __queue_cap_release(struct ceph_mds_session *session,
990 u64 ino, u64 cap_id, u32 migrate_seq,
991 u32 issue_seq)
992{
993 struct ceph_msg *msg;
994 struct ceph_mds_cap_release *head;
995 struct ceph_mds_cap_item *item;
996
997 spin_lock(&session->s_cap_lock);
998 BUG_ON(!session->s_num_cap_releases);
999 msg = list_first_entry(&session->s_cap_releases,
1000 struct ceph_msg, list_head);
1001
1002 dout(" adding %llx release to mds%d msg %p (%d left)\n",
1003 ino, session->s_mds, msg, session->s_num_cap_releases);
1004
1005 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1006 head = msg->front.iov_base;
1007 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1008 item = msg->front.iov_base + msg->front.iov_len;
1009 item->ino = cpu_to_le64(ino);
1010 item->cap_id = cpu_to_le64(cap_id);
1011 item->migrate_seq = cpu_to_le32(migrate_seq);
1012 item->seq = cpu_to_le32(issue_seq);
1013
1014 session->s_num_cap_releases--;
1015
1016 msg->front.iov_len += sizeof(*item);
1017 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1018 dout(" release msg %p full\n", msg);
1019 list_move_tail(&msg->list_head, &session->s_cap_releases_done);
1020 } else {
1021 dout(" release msg %p at %d/%d (%d)\n", msg,
1022 (int)le32_to_cpu(head->num),
1023 (int)CEPH_CAPS_PER_RELEASE,
1024 (int)msg->front.iov_len);
1025 }
1026 spin_unlock(&session->s_cap_lock);
1027}
1028
a8599bd8 1029/*
a6369741
SW
1030 * Queue cap releases when an inode is dropped from our cache. Since
1031 * inode is about to be destroyed, there is no need for i_lock.
a8599bd8
SW
1032 */
1033void ceph_queue_caps_release(struct inode *inode)
1034{
1035 struct ceph_inode_info *ci = ceph_inode(inode);
1036 struct rb_node *p;
1037
a8599bd8
SW
1038 p = rb_first(&ci->i_caps);
1039 while (p) {
1040 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1041 struct ceph_mds_session *session = cap->session;
a8599bd8 1042
3d7ded4d
SW
1043 __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1044 cap->mseq, cap->issue_seq);
a8599bd8 1045 p = rb_next(p);
7c1332b8 1046 __ceph_remove_cap(cap);
a8599bd8 1047 }
a8599bd8
SW
1048}
1049
1050/*
1051 * Send a cap msg on the given inode. Update our caps state, then
1052 * drop i_lock and send the message.
1053 *
1054 * Make note of max_size reported/requested from mds, revoked caps
1055 * that have now been implemented.
1056 *
1057 * Make half-hearted attempt ot to invalidate page cache if we are
1058 * dropping RDCACHE. Note that this will leave behind locked pages
1059 * that we'll then need to deal with elsewhere.
1060 *
1061 * Return non-zero if delayed release, or we experienced an error
1062 * such that the caller should requeue + retry later.
1063 *
1064 * called with i_lock, then drops it.
1065 * caller should hold snap_rwsem (read), s_mutex.
1066 */
1067static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1068 int op, int used, int want, int retain, int flushing,
1069 unsigned *pflush_tid)
1070 __releases(cap->ci->vfs_inode->i_lock)
1071{
1072 struct ceph_inode_info *ci = cap->ci;
1073 struct inode *inode = &ci->vfs_inode;
1074 u64 cap_id = cap->cap_id;
68c28323 1075 int held, revoking, dropping, keep;
a8599bd8
SW
1076 u64 seq, issue_seq, mseq, time_warp_seq, follows;
1077 u64 size, max_size;
1078 struct timespec mtime, atime;
1079 int wake = 0;
1080 mode_t mode;
1081 uid_t uid;
1082 gid_t gid;
1083 struct ceph_mds_session *session;
1084 u64 xattr_version = 0;
082afec9 1085 struct ceph_buffer *xattr_blob = NULL;
a8599bd8
SW
1086 int delayed = 0;
1087 u64 flush_tid = 0;
1088 int i;
1089 int ret;
1090
68c28323
SW
1091 held = cap->issued | cap->implemented;
1092 revoking = cap->implemented & ~cap->issued;
1093 retain &= ~revoking;
1094 dropping = cap->issued & ~retain;
1095
a8599bd8
SW
1096 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1097 inode, cap, cap->session,
1098 ceph_cap_string(held), ceph_cap_string(held & retain),
1099 ceph_cap_string(revoking));
1100 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1101
1102 session = cap->session;
1103
1104 /* don't release wanted unless we've waited a bit. */
1105 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1106 time_before(jiffies, ci->i_hold_caps_min)) {
1107 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1108 ceph_cap_string(cap->issued),
1109 ceph_cap_string(cap->issued & retain),
1110 ceph_cap_string(cap->mds_wanted),
1111 ceph_cap_string(want));
1112 want |= cap->mds_wanted;
1113 retain |= cap->issued;
1114 delayed = 1;
1115 }
1116 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1117
1118 cap->issued &= retain; /* drop bits we don't want */
1119 if (cap->implemented & ~cap->issued) {
1120 /*
1121 * Wake up any waiters on wanted -> needed transition.
1122 * This is due to the weird transition from buffered
1123 * to sync IO... we need to flush dirty pages _before_
1124 * allowing sync writes to avoid reordering.
1125 */
1126 wake = 1;
1127 }
1128 cap->implemented &= cap->issued | used;
1129 cap->mds_wanted = want;
1130
1131 if (flushing) {
1132 /*
1133 * assign a tid for flush operations so we can avoid
1134 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1135 * clean type races. track latest tid for every bit
1136 * so we can handle flush AxFw, flush Fw, and have the
1137 * first ack clean Ax.
1138 */
1139 flush_tid = ++ci->i_cap_flush_last_tid;
1140 if (pflush_tid)
1141 *pflush_tid = flush_tid;
1142 dout(" cap_flush_tid %d\n", (int)flush_tid);
1143 for (i = 0; i < CEPH_CAP_BITS; i++)
1144 if (flushing & (1 << i))
1145 ci->i_cap_flush_tid[i] = flush_tid;
7d8cb26d
SW
1146
1147 follows = ci->i_head_snapc->seq;
1148 } else {
1149 follows = 0;
a8599bd8
SW
1150 }
1151
1152 keep = cap->implemented;
1153 seq = cap->seq;
1154 issue_seq = cap->issue_seq;
1155 mseq = cap->mseq;
1156 size = inode->i_size;
1157 ci->i_reported_size = size;
1158 max_size = ci->i_wanted_max_size;
1159 ci->i_requested_max_size = max_size;
1160 mtime = inode->i_mtime;
1161 atime = inode->i_atime;
1162 time_warp_seq = ci->i_time_warp_seq;
a8599bd8
SW
1163 uid = inode->i_uid;
1164 gid = inode->i_gid;
1165 mode = inode->i_mode;
1166
082afec9 1167 if (flushing & CEPH_CAP_XATTR_EXCL) {
a8599bd8 1168 __ceph_build_xattrs_blob(ci);
082afec9
SW
1169 xattr_blob = ci->i_xattrs.blob;
1170 xattr_version = ci->i_xattrs.version;
a8599bd8
SW
1171 }
1172
1173 spin_unlock(&inode->i_lock);
1174
a8599bd8
SW
1175 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1176 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1177 size, max_size, &mtime, &atime, time_warp_seq,
082afec9 1178 uid, gid, mode, xattr_version, xattr_blob,
a8599bd8
SW
1179 follows);
1180 if (ret < 0) {
1181 dout("error sending cap msg, must requeue %p\n", inode);
1182 delayed = 1;
1183 }
1184
1185 if (wake)
03066f23 1186 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
1187
1188 return delayed;
1189}
1190
1191/*
1192 * When a snapshot is taken, clients accumulate dirty metadata on
1193 * inodes with capabilities in ceph_cap_snaps to describe the file
1194 * state at the time the snapshot was taken. This must be flushed
1195 * asynchronously back to the MDS once sync writes complete and dirty
1196 * data is written out.
1197 *
e835124c
SW
1198 * Unless @again is true, skip cap_snaps that were already sent to
1199 * the MDS (i.e., during this session).
1200 *
a8599bd8
SW
1201 * Called under i_lock. Takes s_mutex as needed.
1202 */
1203void __ceph_flush_snaps(struct ceph_inode_info *ci,
e835124c
SW
1204 struct ceph_mds_session **psession,
1205 int again)
cd84db6e
YS
1206 __releases(ci->vfs_inode->i_lock)
1207 __acquires(ci->vfs_inode->i_lock)
a8599bd8
SW
1208{
1209 struct inode *inode = &ci->vfs_inode;
1210 int mds;
1211 struct ceph_cap_snap *capsnap;
1212 u32 mseq;
1213 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1214 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1215 session->s_mutex */
1216 u64 next_follows = 0; /* keep track of how far we've gotten through the
1217 i_cap_snaps list, and skip these entries next time
1218 around to avoid an infinite loop */
1219
1220 if (psession)
1221 session = *psession;
1222
1223 dout("__flush_snaps %p\n", inode);
1224retry:
1225 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1226 /* avoid an infiniute loop after retry */
1227 if (capsnap->follows < next_follows)
1228 continue;
1229 /*
1230 * we need to wait for sync writes to complete and for dirty
1231 * pages to be written out.
1232 */
1233 if (capsnap->dirty_pages || capsnap->writing)
cfc0bf66 1234 break;
a8599bd8 1235
819ccbfa
SW
1236 /*
1237 * if cap writeback already occurred, we should have dropped
1238 * the capsnap in ceph_put_wrbuffer_cap_refs.
1239 */
1240 BUG_ON(capsnap->dirty == 0);
1241
a8599bd8 1242 /* pick mds, take s_mutex */
ca81f3f6
SW
1243 if (ci->i_auth_cap == NULL) {
1244 dout("no auth cap (migrating?), doing nothing\n");
1245 goto out;
1246 }
e835124c
SW
1247
1248 /* only flush each capsnap once */
1249 if (!again && !list_empty(&capsnap->flushing_item)) {
1250 dout("already flushed %p, skipping\n", capsnap);
1251 continue;
1252 }
1253
ca81f3f6
SW
1254 mds = ci->i_auth_cap->session->s_mds;
1255 mseq = ci->i_auth_cap->mseq;
1256
a8599bd8
SW
1257 if (session && session->s_mds != mds) {
1258 dout("oops, wrong session %p mutex\n", session);
1259 mutex_unlock(&session->s_mutex);
1260 ceph_put_mds_session(session);
1261 session = NULL;
1262 }
1263 if (!session) {
1264 spin_unlock(&inode->i_lock);
1265 mutex_lock(&mdsc->mutex);
1266 session = __ceph_lookup_mds_session(mdsc, mds);
1267 mutex_unlock(&mdsc->mutex);
1268 if (session) {
1269 dout("inverting session/ino locks on %p\n",
1270 session);
1271 mutex_lock(&session->s_mutex);
1272 }
1273 /*
1274 * if session == NULL, we raced against a cap
ca81f3f6
SW
1275 * deletion or migration. retry, and we'll
1276 * get a better @mds value next time.
a8599bd8
SW
1277 */
1278 spin_lock(&inode->i_lock);
1279 goto retry;
1280 }
1281
1282 capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1283 atomic_inc(&capsnap->nref);
1284 if (!list_empty(&capsnap->flushing_item))
1285 list_del_init(&capsnap->flushing_item);
1286 list_add_tail(&capsnap->flushing_item,
1287 &session->s_cap_snaps_flushing);
1288 spin_unlock(&inode->i_lock);
1289
cfc0bf66
SW
1290 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1291 inode, capsnap, capsnap->follows, capsnap->flush_tid);
a8599bd8
SW
1292 send_cap_msg(session, ceph_vino(inode).ino, 0,
1293 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1294 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1295 capsnap->size, 0,
1296 &capsnap->mtime, &capsnap->atime,
1297 capsnap->time_warp_seq,
1298 capsnap->uid, capsnap->gid, capsnap->mode,
4a625be4 1299 capsnap->xattr_version, capsnap->xattr_blob,
a8599bd8
SW
1300 capsnap->follows);
1301
1302 next_follows = capsnap->follows + 1;
1303 ceph_put_cap_snap(capsnap);
1304
1305 spin_lock(&inode->i_lock);
1306 goto retry;
1307 }
1308
1309 /* we flushed them all; remove this inode from the queue */
1310 spin_lock(&mdsc->snap_flush_lock);
1311 list_del_init(&ci->i_snap_flush_item);
1312 spin_unlock(&mdsc->snap_flush_lock);
1313
ca81f3f6 1314out:
a8599bd8
SW
1315 if (psession)
1316 *psession = session;
1317 else if (session) {
1318 mutex_unlock(&session->s_mutex);
1319 ceph_put_mds_session(session);
1320 }
1321}
1322
1323static void ceph_flush_snaps(struct ceph_inode_info *ci)
1324{
1325 struct inode *inode = &ci->vfs_inode;
1326
1327 spin_lock(&inode->i_lock);
e835124c 1328 __ceph_flush_snaps(ci, NULL, 0);
a8599bd8
SW
1329 spin_unlock(&inode->i_lock);
1330}
1331
76e3b390
SW
1332/*
1333 * Mark caps dirty. If inode is newly dirty, add to the global dirty
1334 * list.
1335 */
1336void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1337{
640ef79d
CR
1338 struct ceph_mds_client *mdsc =
1339 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
76e3b390
SW
1340 struct inode *inode = &ci->vfs_inode;
1341 int was = ci->i_dirty_caps;
1342 int dirty = 0;
1343
1344 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1345 ceph_cap_string(mask), ceph_cap_string(was),
1346 ceph_cap_string(was | mask));
1347 ci->i_dirty_caps |= mask;
1348 if (was == 0) {
7d8cb26d
SW
1349 if (!ci->i_head_snapc)
1350 ci->i_head_snapc = ceph_get_snap_context(
1351 ci->i_snap_realm->cached_context);
1352 dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
1353 ci->i_head_snapc);
76e3b390
SW
1354 BUG_ON(!list_empty(&ci->i_dirty_item));
1355 spin_lock(&mdsc->cap_dirty_lock);
1356 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1357 spin_unlock(&mdsc->cap_dirty_lock);
1358 if (ci->i_flushing_caps == 0) {
1359 igrab(inode);
1360 dirty |= I_DIRTY_SYNC;
1361 }
1362 }
1363 BUG_ON(list_empty(&ci->i_dirty_item));
1364 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1365 (mask & CEPH_CAP_FILE_BUFFER))
1366 dirty |= I_DIRTY_DATASYNC;
1367 if (dirty)
1368 __mark_inode_dirty(inode, dirty);
1369 __cap_delay_requeue(mdsc, ci);
1370}
1371
a8599bd8
SW
1372/*
1373 * Add dirty inode to the flushing list. Assigned a seq number so we
1374 * can wait for caps to flush without starving.
cdc35f96
SW
1375 *
1376 * Called under i_lock.
a8599bd8 1377 */
cdc35f96 1378static int __mark_caps_flushing(struct inode *inode,
a8599bd8
SW
1379 struct ceph_mds_session *session)
1380{
640ef79d 1381 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 1382 struct ceph_inode_info *ci = ceph_inode(inode);
cdc35f96 1383 int flushing;
50b885b9 1384
cdc35f96 1385 BUG_ON(ci->i_dirty_caps == 0);
a8599bd8 1386 BUG_ON(list_empty(&ci->i_dirty_item));
cdc35f96
SW
1387
1388 flushing = ci->i_dirty_caps;
1389 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1390 ceph_cap_string(flushing),
1391 ceph_cap_string(ci->i_flushing_caps),
1392 ceph_cap_string(ci->i_flushing_caps | flushing));
1393 ci->i_flushing_caps |= flushing;
1394 ci->i_dirty_caps = 0;
afcdaea3 1395 dout(" inode %p now !dirty\n", inode);
cdc35f96 1396
a8599bd8 1397 spin_lock(&mdsc->cap_dirty_lock);
afcdaea3
SW
1398 list_del_init(&ci->i_dirty_item);
1399
1400 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
a8599bd8
SW
1401 if (list_empty(&ci->i_flushing_item)) {
1402 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1403 mdsc->num_cap_flushing++;
afcdaea3
SW
1404 dout(" inode %p now flushing seq %lld\n", inode,
1405 ci->i_cap_flush_seq);
1406 } else {
1407 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1408 dout(" inode %p now flushing (more) seq %lld\n", inode,
a8599bd8
SW
1409 ci->i_cap_flush_seq);
1410 }
1411 spin_unlock(&mdsc->cap_dirty_lock);
cdc35f96
SW
1412
1413 return flushing;
a8599bd8
SW
1414}
1415
5ecad6fd
SW
1416/*
1417 * try to invalidate mapping pages without blocking.
1418 */
1419static int mapping_is_empty(struct address_space *mapping)
1420{
1421 struct page *page = find_get_page(mapping, 0);
1422
1423 if (!page)
1424 return 1;
1425
1426 put_page(page);
1427 return 0;
1428}
1429
1430static int try_nonblocking_invalidate(struct inode *inode)
1431{
1432 struct ceph_inode_info *ci = ceph_inode(inode);
1433 u32 invalidating_gen = ci->i_rdcache_gen;
1434
1435 spin_unlock(&inode->i_lock);
1436 invalidate_mapping_pages(&inode->i_data, 0, -1);
1437 spin_lock(&inode->i_lock);
1438
1439 if (mapping_is_empty(&inode->i_data) &&
1440 invalidating_gen == ci->i_rdcache_gen) {
1441 /* success. */
1442 dout("try_nonblocking_invalidate %p success\n", inode);
1443 ci->i_rdcache_gen = 0;
1444 ci->i_rdcache_revoking = 0;
1445 return 0;
1446 }
1447 dout("try_nonblocking_invalidate %p failed\n", inode);
1448 return -1;
1449}
1450
a8599bd8
SW
1451/*
1452 * Swiss army knife function to examine currently used and wanted
1453 * versus held caps. Release, flush, ack revoked caps to mds as
1454 * appropriate.
1455 *
1456 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1457 * cap release further.
1458 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1459 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1460 * further delay.
1461 */
1462void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1463 struct ceph_mds_session *session)
1464{
1465 struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
1466 struct ceph_mds_client *mdsc = &client->mdsc;
1467 struct inode *inode = &ci->vfs_inode;
1468 struct ceph_cap *cap;
1469 int file_wanted, used;
1470 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
cbd03635 1471 int issued, implemented, want, retain, revoking, flushing = 0;
a8599bd8
SW
1472 int mds = -1; /* keep track of how far we've gone through i_caps list
1473 to avoid an infinite loop on retry */
1474 struct rb_node *p;
1475 int tried_invalidate = 0;
1476 int delayed = 0, sent = 0, force_requeue = 0, num;
cbd03635 1477 int queue_invalidate = 0;
a8599bd8
SW
1478 int is_delayed = flags & CHECK_CAPS_NODELAY;
1479
1480 /* if we are unmounting, flush any unused caps immediately. */
1481 if (mdsc->stopping)
1482 is_delayed = 1;
1483
1484 spin_lock(&inode->i_lock);
1485
1486 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1487 flags |= CHECK_CAPS_FLUSH;
1488
1489 /* flush snaps first time around only */
1490 if (!list_empty(&ci->i_cap_snaps))
e835124c 1491 __ceph_flush_snaps(ci, &session, 0);
a8599bd8
SW
1492 goto retry_locked;
1493retry:
1494 spin_lock(&inode->i_lock);
1495retry_locked:
1496 file_wanted = __ceph_caps_file_wanted(ci);
1497 used = __ceph_caps_used(ci);
1498 want = file_wanted | used;
cbd03635
SW
1499 issued = __ceph_caps_issued(ci, &implemented);
1500 revoking = implemented & ~issued;
a8599bd8
SW
1501
1502 retain = want | CEPH_CAP_PIN;
1503 if (!mdsc->stopping && inode->i_nlink > 0) {
1504 if (want) {
1505 retain |= CEPH_CAP_ANY; /* be greedy */
1506 } else {
1507 retain |= CEPH_CAP_ANY_SHARED;
1508 /*
1509 * keep RD only if we didn't have the file open RW,
1510 * because then the mds would revoke it anyway to
1511 * journal max_size=0.
1512 */
1513 if (ci->i_max_size == 0)
1514 retain |= CEPH_CAP_ANY_RD;
1515 }
1516 }
1517
1518 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
cbd03635 1519 " issued %s revoking %s retain %s %s%s%s\n", inode,
a8599bd8
SW
1520 ceph_cap_string(file_wanted),
1521 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1522 ceph_cap_string(ci->i_flushing_caps),
cbd03635 1523 ceph_cap_string(issued), ceph_cap_string(revoking),
a8599bd8
SW
1524 ceph_cap_string(retain),
1525 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1526 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1527 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1528
1529 /*
1530 * If we no longer need to hold onto old our caps, and we may
1531 * have cached pages, but don't want them, then try to invalidate.
1532 * If we fail, it's because pages are locked.... try again later.
1533 */
1534 if ((!is_delayed || mdsc->stopping) &&
1535 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
1536 ci->i_rdcache_gen && /* may have cached pages */
cbd03635 1537 (file_wanted == 0 || /* no open files */
2962507c
SW
1538 (revoking & (CEPH_CAP_FILE_CACHE|
1539 CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */
a8599bd8 1540 !tried_invalidate) {
a8599bd8 1541 dout("check_caps trying to invalidate on %p\n", inode);
5ecad6fd 1542 if (try_nonblocking_invalidate(inode) < 0) {
2962507c
SW
1543 if (revoking & (CEPH_CAP_FILE_CACHE|
1544 CEPH_CAP_FILE_LAZYIO)) {
5ecad6fd
SW
1545 dout("check_caps queuing invalidate\n");
1546 queue_invalidate = 1;
1547 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1548 } else {
1549 dout("check_caps failed to invalidate pages\n");
1550 /* we failed to invalidate pages. check these
1551 caps again later. */
1552 force_requeue = 1;
1553 __cap_set_timeouts(mdsc, ci);
1554 }
a8599bd8
SW
1555 }
1556 tried_invalidate = 1;
1557 goto retry_locked;
1558 }
1559
1560 num = 0;
1561 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1562 cap = rb_entry(p, struct ceph_cap, ci_node);
1563 num++;
1564
1565 /* avoid looping forever */
1566 if (mds >= cap->mds ||
1567 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1568 continue;
1569
1570 /* NOTE: no side-effects allowed, until we take s_mutex */
1571
1572 revoking = cap->implemented & ~cap->issued;
1573 if (revoking)
cbd03635 1574 dout(" mds%d revoking %s\n", cap->mds,
a8599bd8
SW
1575 ceph_cap_string(revoking));
1576
1577 if (cap == ci->i_auth_cap &&
1578 (cap->issued & CEPH_CAP_FILE_WR)) {
1579 /* request larger max_size from MDS? */
1580 if (ci->i_wanted_max_size > ci->i_max_size &&
1581 ci->i_wanted_max_size > ci->i_requested_max_size) {
1582 dout("requesting new max_size\n");
1583 goto ack;
1584 }
1585
1586 /* approaching file_max? */
1587 if ((inode->i_size << 1) >= ci->i_max_size &&
1588 (ci->i_reported_size << 1) < ci->i_max_size) {
1589 dout("i_size approaching max_size\n");
1590 goto ack;
1591 }
1592 }
1593 /* flush anything dirty? */
1594 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1595 ci->i_dirty_caps) {
1596 dout("flushing dirty caps\n");
1597 goto ack;
1598 }
1599
1600 /* completed revocation? going down and there are no caps? */
1601 if (revoking && (revoking & used) == 0) {
1602 dout("completed revocation of %s\n",
1603 ceph_cap_string(cap->implemented & ~cap->issued));
1604 goto ack;
1605 }
1606
1607 /* want more caps from mds? */
1608 if (want & ~(cap->mds_wanted | cap->issued))
1609 goto ack;
1610
1611 /* things we might delay */
1612 if ((cap->issued & ~retain) == 0 &&
1613 cap->mds_wanted == want)
1614 continue; /* nope, all good */
1615
1616 if (is_delayed)
1617 goto ack;
1618
1619 /* delay? */
1620 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1621 time_before(jiffies, ci->i_hold_caps_max)) {
1622 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1623 ceph_cap_string(cap->issued),
1624 ceph_cap_string(cap->issued & retain),
1625 ceph_cap_string(cap->mds_wanted),
1626 ceph_cap_string(want));
1627 delayed++;
1628 continue;
1629 }
1630
1631ack:
e9964c10
SW
1632 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1633 dout(" skipping %p I_NOFLUSH set\n", inode);
1634 continue;
1635 }
1636
a8599bd8
SW
1637 if (session && session != cap->session) {
1638 dout("oops, wrong session %p mutex\n", session);
1639 mutex_unlock(&session->s_mutex);
1640 session = NULL;
1641 }
1642 if (!session) {
1643 session = cap->session;
1644 if (mutex_trylock(&session->s_mutex) == 0) {
1645 dout("inverting session/ino locks on %p\n",
1646 session);
1647 spin_unlock(&inode->i_lock);
1648 if (took_snap_rwsem) {
1649 up_read(&mdsc->snap_rwsem);
1650 took_snap_rwsem = 0;
1651 }
1652 mutex_lock(&session->s_mutex);
1653 goto retry;
1654 }
1655 }
1656 /* take snap_rwsem after session mutex */
1657 if (!took_snap_rwsem) {
1658 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1659 dout("inverting snap/in locks on %p\n",
1660 inode);
1661 spin_unlock(&inode->i_lock);
1662 down_read(&mdsc->snap_rwsem);
1663 took_snap_rwsem = 1;
1664 goto retry;
1665 }
1666 took_snap_rwsem = 1;
1667 }
1668
cdc35f96
SW
1669 if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1670 flushing = __mark_caps_flushing(inode, session);
a8599bd8
SW
1671
1672 mds = cap->mds; /* remember mds, so we don't repeat */
1673 sent++;
1674
1675 /* __send_cap drops i_lock */
1676 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1677 retain, flushing, NULL);
1678 goto retry; /* retake i_lock and restart our cap scan. */
1679 }
1680
1681 /*
1682 * Reschedule delayed caps release if we delayed anything,
1683 * otherwise cancel.
1684 */
1685 if (delayed && is_delayed)
1686 force_requeue = 1; /* __send_cap delayed release; requeue */
1687 if (!delayed && !is_delayed)
1688 __cap_delay_cancel(mdsc, ci);
1689 else if (!is_delayed || force_requeue)
1690 __cap_delay_requeue(mdsc, ci);
1691
1692 spin_unlock(&inode->i_lock);
1693
cbd03635 1694 if (queue_invalidate)
3c6f6b79 1695 ceph_queue_invalidate(inode);
cbd03635 1696
cdc2ce05 1697 if (session)
a8599bd8
SW
1698 mutex_unlock(&session->s_mutex);
1699 if (took_snap_rwsem)
1700 up_read(&mdsc->snap_rwsem);
1701}
1702
a8599bd8
SW
1703/*
1704 * Try to flush dirty caps back to the auth mds.
1705 */
1706static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1707 unsigned *flush_tid)
1708{
640ef79d 1709 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8
SW
1710 struct ceph_inode_info *ci = ceph_inode(inode);
1711 int unlock_session = session ? 0 : 1;
1712 int flushing = 0;
1713
1714retry:
1715 spin_lock(&inode->i_lock);
e9964c10
SW
1716 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1717 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1718 goto out;
1719 }
a8599bd8
SW
1720 if (ci->i_dirty_caps && ci->i_auth_cap) {
1721 struct ceph_cap *cap = ci->i_auth_cap;
1722 int used = __ceph_caps_used(ci);
1723 int want = __ceph_caps_wanted(ci);
1724 int delayed;
1725
1726 if (!session) {
1727 spin_unlock(&inode->i_lock);
1728 session = cap->session;
1729 mutex_lock(&session->s_mutex);
1730 goto retry;
1731 }
1732 BUG_ON(session != cap->session);
1733 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1734 goto out;
1735
cdc35f96 1736 flushing = __mark_caps_flushing(inode, session);
a8599bd8
SW
1737
1738 /* __send_cap drops i_lock */
1739 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1740 cap->issued | cap->implemented, flushing,
1741 flush_tid);
1742 if (!delayed)
1743 goto out_unlocked;
1744
1745 spin_lock(&inode->i_lock);
1746 __cap_delay_requeue(mdsc, ci);
1747 }
1748out:
1749 spin_unlock(&inode->i_lock);
1750out_unlocked:
1751 if (session && unlock_session)
1752 mutex_unlock(&session->s_mutex);
1753 return flushing;
1754}
1755
1756/*
1757 * Return true if we've flushed caps through the given flush_tid.
1758 */
1759static int caps_are_flushed(struct inode *inode, unsigned tid)
1760{
1761 struct ceph_inode_info *ci = ceph_inode(inode);
a5ee751c 1762 int i, ret = 1;
a8599bd8
SW
1763
1764 spin_lock(&inode->i_lock);
a8599bd8
SW
1765 for (i = 0; i < CEPH_CAP_BITS; i++)
1766 if ((ci->i_flushing_caps & (1 << i)) &&
1767 ci->i_cap_flush_tid[i] <= tid) {
1768 /* still flushing this bit */
1769 ret = 0;
1770 break;
1771 }
1772 spin_unlock(&inode->i_lock);
1773 return ret;
1774}
1775
1776/*
1777 * Wait on any unsafe replies for the given inode. First wait on the
1778 * newest request, and make that the upper bound. Then, if there are
1779 * more requests, keep waiting on the oldest as long as it is still older
1780 * than the original request.
1781 */
1782static void sync_write_wait(struct inode *inode)
1783{
1784 struct ceph_inode_info *ci = ceph_inode(inode);
1785 struct list_head *head = &ci->i_unsafe_writes;
1786 struct ceph_osd_request *req;
1787 u64 last_tid;
1788
1789 spin_lock(&ci->i_unsafe_lock);
1790 if (list_empty(head))
1791 goto out;
1792
1793 /* set upper bound as _last_ entry in chain */
1794 req = list_entry(head->prev, struct ceph_osd_request,
1795 r_unsafe_item);
1796 last_tid = req->r_tid;
1797
1798 do {
1799 ceph_osdc_get_request(req);
1800 spin_unlock(&ci->i_unsafe_lock);
1801 dout("sync_write_wait on tid %llu (until %llu)\n",
1802 req->r_tid, last_tid);
1803 wait_for_completion(&req->r_safe_completion);
1804 spin_lock(&ci->i_unsafe_lock);
1805 ceph_osdc_put_request(req);
1806
1807 /*
1808 * from here on look at first entry in chain, since we
1809 * only want to wait for anything older than last_tid
1810 */
1811 if (list_empty(head))
1812 break;
1813 req = list_entry(head->next, struct ceph_osd_request,
1814 r_unsafe_item);
1815 } while (req->r_tid < last_tid);
1816out:
1817 spin_unlock(&ci->i_unsafe_lock);
1818}
1819
7ea80859 1820int ceph_fsync(struct file *file, int datasync)
a8599bd8 1821{
7ea80859 1822 struct inode *inode = file->f_mapping->host;
a8599bd8
SW
1823 struct ceph_inode_info *ci = ceph_inode(inode);
1824 unsigned flush_tid;
1825 int ret;
1826 int dirty;
1827
1828 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1829 sync_write_wait(inode);
1830
1831 ret = filemap_write_and_wait(inode->i_mapping);
1832 if (ret < 0)
1833 return ret;
1834
1835 dirty = try_flush_caps(inode, NULL, &flush_tid);
1836 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1837
1838 /*
1839 * only wait on non-file metadata writeback (the mds
1840 * can recover size and mtime, so we don't need to
1841 * wait for that)
1842 */
1843 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1844 dout("fsync waiting for flush_tid %u\n", flush_tid);
1845 ret = wait_event_interruptible(ci->i_cap_wq,
1846 caps_are_flushed(inode, flush_tid));
1847 }
1848
1849 dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1850 return ret;
1851}
1852
1853/*
1854 * Flush any dirty caps back to the mds. If we aren't asked to wait,
1855 * queue inode for flush but don't do so immediately, because we can
1856 * get by with fewer MDS messages if we wait for data writeback to
1857 * complete first.
1858 */
f1a3d572 1859int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
a8599bd8
SW
1860{
1861 struct ceph_inode_info *ci = ceph_inode(inode);
1862 unsigned flush_tid;
1863 int err = 0;
1864 int dirty;
f1a3d572 1865 int wait = wbc->sync_mode == WB_SYNC_ALL;
a8599bd8
SW
1866
1867 dout("write_inode %p wait=%d\n", inode, wait);
1868 if (wait) {
1869 dirty = try_flush_caps(inode, NULL, &flush_tid);
1870 if (dirty)
1871 err = wait_event_interruptible(ci->i_cap_wq,
1872 caps_are_flushed(inode, flush_tid));
1873 } else {
640ef79d
CR
1874 struct ceph_mds_client *mdsc =
1875 &ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8
SW
1876
1877 spin_lock(&inode->i_lock);
1878 if (__ceph_caps_dirty(ci))
1879 __cap_delay_requeue_front(mdsc, ci);
1880 spin_unlock(&inode->i_lock);
1881 }
1882 return err;
1883}
1884
1885/*
1886 * After a recovering MDS goes active, we need to resend any caps
1887 * we were flushing.
1888 *
1889 * Caller holds session->s_mutex.
1890 */
1891static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1892 struct ceph_mds_session *session)
1893{
1894 struct ceph_cap_snap *capsnap;
1895
1896 dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1897 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1898 flushing_item) {
1899 struct ceph_inode_info *ci = capsnap->ci;
1900 struct inode *inode = &ci->vfs_inode;
1901 struct ceph_cap *cap;
1902
1903 spin_lock(&inode->i_lock);
1904 cap = ci->i_auth_cap;
1905 if (cap && cap->session == session) {
1906 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1907 cap, capsnap);
e835124c 1908 __ceph_flush_snaps(ci, &session, 1);
a8599bd8
SW
1909 } else {
1910 pr_err("%p auth cap %p not mds%d ???\n", inode,
1911 cap, session->s_mds);
a8599bd8 1912 }
0b0c06d1 1913 spin_unlock(&inode->i_lock);
a8599bd8
SW
1914 }
1915}
1916
1917void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1918 struct ceph_mds_session *session)
1919{
1920 struct ceph_inode_info *ci;
1921
1922 kick_flushing_capsnaps(mdsc, session);
1923
1924 dout("kick_flushing_caps mds%d\n", session->s_mds);
1925 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1926 struct inode *inode = &ci->vfs_inode;
1927 struct ceph_cap *cap;
1928 int delayed = 0;
1929
1930 spin_lock(&inode->i_lock);
1931 cap = ci->i_auth_cap;
1932 if (cap && cap->session == session) {
1933 dout("kick_flushing_caps %p cap %p %s\n", inode,
1934 cap, ceph_cap_string(ci->i_flushing_caps));
1935 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1936 __ceph_caps_used(ci),
1937 __ceph_caps_wanted(ci),
1938 cap->issued | cap->implemented,
1939 ci->i_flushing_caps, NULL);
1940 if (delayed) {
1941 spin_lock(&inode->i_lock);
1942 __cap_delay_requeue(mdsc, ci);
1943 spin_unlock(&inode->i_lock);
1944 }
1945 } else {
1946 pr_err("%p auth cap %p not mds%d ???\n", inode,
1947 cap, session->s_mds);
1948 spin_unlock(&inode->i_lock);
1949 }
1950 }
1951}
1952
1953
1954/*
1955 * Take references to capabilities we hold, so that we don't release
1956 * them to the MDS prematurely.
1957 *
1958 * Protected by i_lock.
1959 */
1960static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1961{
1962 if (got & CEPH_CAP_PIN)
1963 ci->i_pin_ref++;
1964 if (got & CEPH_CAP_FILE_RD)
1965 ci->i_rd_ref++;
1966 if (got & CEPH_CAP_FILE_CACHE)
1967 ci->i_rdcache_ref++;
1968 if (got & CEPH_CAP_FILE_WR)
1969 ci->i_wr_ref++;
1970 if (got & CEPH_CAP_FILE_BUFFER) {
1971 if (ci->i_wrbuffer_ref == 0)
1972 igrab(&ci->vfs_inode);
1973 ci->i_wrbuffer_ref++;
1974 dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
1975 &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
1976 }
1977}
1978
1979/*
1980 * Try to grab cap references. Specify those refs we @want, and the
1981 * minimal set we @need. Also include the larger offset we are writing
1982 * to (when applicable), and check against max_size here as well.
1983 * Note that caller is responsible for ensuring max_size increases are
1984 * requested from the MDS.
1985 */
1986static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
1987 int *got, loff_t endoff, int *check_max, int *err)
1988{
1989 struct inode *inode = &ci->vfs_inode;
1990 int ret = 0;
1991 int have, implemented;
195d3ce2 1992 int file_wanted;
a8599bd8
SW
1993
1994 dout("get_cap_refs %p need %s want %s\n", inode,
1995 ceph_cap_string(need), ceph_cap_string(want));
1996 spin_lock(&inode->i_lock);
1997
195d3ce2
SW
1998 /* make sure file is actually open */
1999 file_wanted = __ceph_caps_file_wanted(ci);
2000 if ((file_wanted & need) == 0) {
2001 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2002 ceph_cap_string(need), ceph_cap_string(file_wanted));
a8599bd8
SW
2003 *err = -EBADF;
2004 ret = 1;
2005 goto out;
2006 }
2007
2008 if (need & CEPH_CAP_FILE_WR) {
2009 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2010 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2011 inode, endoff, ci->i_max_size);
2012 if (endoff > ci->i_wanted_max_size) {
2013 *check_max = 1;
2014 ret = 1;
2015 }
2016 goto out;
2017 }
2018 /*
2019 * If a sync write is in progress, we must wait, so that we
2020 * can get a final snapshot value for size+mtime.
2021 */
2022 if (__ceph_have_pending_cap_snap(ci)) {
2023 dout("get_cap_refs %p cap_snap_pending\n", inode);
2024 goto out;
2025 }
2026 }
2027 have = __ceph_caps_issued(ci, &implemented);
2028
2029 /*
2030 * disallow writes while a truncate is pending
2031 */
2032 if (ci->i_truncate_pending)
2033 have &= ~CEPH_CAP_FILE_WR;
2034
2035 if ((have & need) == need) {
2036 /*
2037 * Look at (implemented & ~have & not) so that we keep waiting
2038 * on transition from wanted -> needed caps. This is needed
2039 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2040 * going before a prior buffered writeback happens.
2041 */
2042 int not = want & ~(have & need);
2043 int revoking = implemented & ~have;
2044 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2045 inode, ceph_cap_string(have), ceph_cap_string(not),
2046 ceph_cap_string(revoking));
2047 if ((revoking & not) == 0) {
2048 *got = need | (have & want);
2049 __take_cap_refs(ci, *got);
2050 ret = 1;
2051 }
2052 } else {
2053 dout("get_cap_refs %p have %s needed %s\n", inode,
2054 ceph_cap_string(have), ceph_cap_string(need));
2055 }
2056out:
2057 spin_unlock(&inode->i_lock);
2058 dout("get_cap_refs %p ret %d got %s\n", inode,
2059 ret, ceph_cap_string(*got));
2060 return ret;
2061}
2062
2063/*
2064 * Check the offset we are writing up to against our current
2065 * max_size. If necessary, tell the MDS we want to write to
2066 * a larger offset.
2067 */
2068static void check_max_size(struct inode *inode, loff_t endoff)
2069{
2070 struct ceph_inode_info *ci = ceph_inode(inode);
2071 int check = 0;
2072
2073 /* do we need to explicitly request a larger max_size? */
2074 spin_lock(&inode->i_lock);
2075 if ((endoff >= ci->i_max_size ||
2076 endoff > (inode->i_size << 1)) &&
2077 endoff > ci->i_wanted_max_size) {
2078 dout("write %p at large endoff %llu, req max_size\n",
2079 inode, endoff);
2080 ci->i_wanted_max_size = endoff;
2081 check = 1;
2082 }
2083 spin_unlock(&inode->i_lock);
2084 if (check)
2085 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2086}
2087
2088/*
2089 * Wait for caps, and take cap references. If we can't get a WR cap
2090 * due to a small max_size, make sure we check_max_size (and possibly
2091 * ask the mds) so we don't get hung up indefinitely.
2092 */
2093int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2094 loff_t endoff)
2095{
2096 int check_max, ret, err;
2097
2098retry:
2099 if (endoff > 0)
2100 check_max_size(&ci->vfs_inode, endoff);
2101 check_max = 0;
2102 err = 0;
2103 ret = wait_event_interruptible(ci->i_cap_wq,
2104 try_get_cap_refs(ci, need, want,
2105 got, endoff,
2106 &check_max, &err));
2107 if (err)
2108 ret = err;
2109 if (check_max)
2110 goto retry;
2111 return ret;
2112}
2113
2114/*
2115 * Take cap refs. Caller must already know we hold at least one ref
2116 * on the caps in question or we don't know this is safe.
2117 */
2118void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2119{
2120 spin_lock(&ci->vfs_inode.i_lock);
2121 __take_cap_refs(ci, caps);
2122 spin_unlock(&ci->vfs_inode.i_lock);
2123}
2124
2125/*
2126 * Release cap refs.
2127 *
2128 * If we released the last ref on any given cap, call ceph_check_caps
2129 * to release (or schedule a release).
2130 *
2131 * If we are releasing a WR cap (from a sync write), finalize any affected
2132 * cap_snap, and wake up any waiters.
2133 */
2134void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2135{
2136 struct inode *inode = &ci->vfs_inode;
2137 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2138 struct ceph_cap_snap *capsnap;
2139
2140 spin_lock(&inode->i_lock);
2141 if (had & CEPH_CAP_PIN)
2142 --ci->i_pin_ref;
2143 if (had & CEPH_CAP_FILE_RD)
2144 if (--ci->i_rd_ref == 0)
2145 last++;
2146 if (had & CEPH_CAP_FILE_CACHE)
2147 if (--ci->i_rdcache_ref == 0)
2148 last++;
2149 if (had & CEPH_CAP_FILE_BUFFER) {
2150 if (--ci->i_wrbuffer_ref == 0) {
2151 last++;
2152 put++;
2153 }
2154 dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
2155 inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
2156 }
2157 if (had & CEPH_CAP_FILE_WR)
2158 if (--ci->i_wr_ref == 0) {
2159 last++;
2160 if (!list_empty(&ci->i_cap_snaps)) {
2161 capsnap = list_first_entry(&ci->i_cap_snaps,
2162 struct ceph_cap_snap,
2163 ci_item);
2164 if (capsnap->writing) {
2165 capsnap->writing = 0;
2166 flushsnaps =
2167 __ceph_finish_cap_snap(ci,
2168 capsnap);
2169 wake = 1;
2170 }
2171 }
2172 }
2173 spin_unlock(&inode->i_lock);
2174
819ccbfa
SW
2175 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2176 last ? " last" : "", put ? " put" : "");
a8599bd8
SW
2177
2178 if (last && !flushsnaps)
2179 ceph_check_caps(ci, 0, NULL);
2180 else if (flushsnaps)
2181 ceph_flush_snaps(ci);
2182 if (wake)
03066f23 2183 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
2184 if (put)
2185 iput(inode);
2186}
2187
2188/*
2189 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2190 * context. Adjust per-snap dirty page accounting as appropriate.
2191 * Once all dirty data for a cap_snap is flushed, flush snapped file
2192 * metadata back to the MDS. If we dropped the last ref, call
2193 * ceph_check_caps.
2194 */
2195void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2196 struct ceph_snap_context *snapc)
2197{
2198 struct inode *inode = &ci->vfs_inode;
2199 int last = 0;
819ccbfa
SW
2200 int complete_capsnap = 0;
2201 int drop_capsnap = 0;
a8599bd8
SW
2202 int found = 0;
2203 struct ceph_cap_snap *capsnap = NULL;
2204
2205 spin_lock(&inode->i_lock);
2206 ci->i_wrbuffer_ref -= nr;
2207 last = !ci->i_wrbuffer_ref;
2208
2209 if (ci->i_head_snapc == snapc) {
2210 ci->i_wrbuffer_ref_head -= nr;
7d8cb26d
SW
2211 if (ci->i_wrbuffer_ref_head == 0 &&
2212 ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2213 BUG_ON(!ci->i_head_snapc);
a8599bd8
SW
2214 ceph_put_snap_context(ci->i_head_snapc);
2215 ci->i_head_snapc = NULL;
2216 }
2217 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2218 inode,
2219 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2220 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2221 last ? " LAST" : "");
2222 } else {
2223 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2224 if (capsnap->context == snapc) {
2225 found = 1;
a8599bd8
SW
2226 break;
2227 }
2228 }
2229 BUG_ON(!found);
819ccbfa
SW
2230 capsnap->dirty_pages -= nr;
2231 if (capsnap->dirty_pages == 0) {
2232 complete_capsnap = 1;
2233 if (capsnap->dirty == 0)
2234 /* cap writeback completed before we created
2235 * the cap_snap; no FLUSHSNAP is needed */
2236 drop_capsnap = 1;
2237 }
a8599bd8 2238 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
819ccbfa 2239 " snap %lld %d/%d -> %d/%d %s%s%s\n",
a8599bd8
SW
2240 inode, capsnap, capsnap->context->seq,
2241 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2242 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2243 last ? " (wrbuffer last)" : "",
819ccbfa
SW
2244 complete_capsnap ? " (complete capsnap)" : "",
2245 drop_capsnap ? " (drop capsnap)" : "");
2246 if (drop_capsnap) {
2247 ceph_put_snap_context(capsnap->context);
2248 list_del(&capsnap->ci_item);
2249 list_del(&capsnap->flushing_item);
2250 ceph_put_cap_snap(capsnap);
2251 }
a8599bd8
SW
2252 }
2253
2254 spin_unlock(&inode->i_lock);
2255
2256 if (last) {
2257 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2258 iput(inode);
819ccbfa 2259 } else if (complete_capsnap) {
a8599bd8 2260 ceph_flush_snaps(ci);
03066f23 2261 wake_up_all(&ci->i_cap_wq);
a8599bd8 2262 }
819ccbfa
SW
2263 if (drop_capsnap)
2264 iput(inode);
a8599bd8
SW
2265}
2266
2267/*
2268 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2269 * actually be a revocation if it specifies a smaller cap set.)
2270 *
15637c8b
SW
2271 * caller holds s_mutex and i_lock, we drop both.
2272 *
a8599bd8
SW
2273 * return value:
2274 * 0 - ok
2275 * 1 - check_caps on auth cap only (writeback)
2276 * 2 - check_caps (ack revoke)
2277 */
15637c8b
SW
2278static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2279 struct ceph_mds_session *session,
2280 struct ceph_cap *cap,
2281 struct ceph_buffer *xattr_buf)
cd84db6e 2282 __releases(inode->i_lock)
a8599bd8
SW
2283{
2284 struct ceph_inode_info *ci = ceph_inode(inode);
2285 int mds = session->s_mds;
d91f2438
SW
2286 unsigned seq = le32_to_cpu(grant->seq);
2287 unsigned issue_seq = le32_to_cpu(grant->issue_seq);
a8599bd8
SW
2288 int newcaps = le32_to_cpu(grant->caps);
2289 int issued, implemented, used, wanted, dirty;
2290 u64 size = le64_to_cpu(grant->size);
2291 u64 max_size = le64_to_cpu(grant->max_size);
2292 struct timespec mtime, atime, ctime;
15637c8b 2293 int check_caps = 0;
a8599bd8
SW
2294 int wake = 0;
2295 int writeback = 0;
2296 int revoked_rdcache = 0;
3c6f6b79 2297 int queue_invalidate = 0;
a8599bd8 2298
d91f2438
SW
2299 dout("handle_cap_grant inode %p cap %p mds%d seq %u/%u %s\n",
2300 inode, cap, mds, seq, issue_seq, ceph_cap_string(newcaps));
a8599bd8
SW
2301 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2302 inode->i_size);
2303
2304 /*
2305 * If CACHE is being revoked, and we have no dirty buffers,
2306 * try to invalidate (once). (If there are dirty buffers, we
2307 * will invalidate _after_ writeback.)
2308 */
3b454c49
SW
2309 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2310 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
bcd2cbd1 2311 !ci->i_wrbuffer_ref) {
5ecad6fd
SW
2312 if (try_nonblocking_invalidate(inode) == 0) {
2313 revoked_rdcache = 1;
2314 } else {
a8599bd8
SW
2315 /* there were locked pages.. invalidate later
2316 in a separate thread. */
2317 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
3c6f6b79 2318 queue_invalidate = 1;
a8599bd8
SW
2319 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2320 }
a8599bd8 2321 }
a8599bd8
SW
2322 }
2323
2324 /* side effects now are allowed */
2325
2326 issued = __ceph_caps_issued(ci, &implemented);
2327 issued |= implemented | __ceph_caps_dirty(ci);
2328
685f9a5d 2329 cap->cap_gen = session->s_cap_gen;
a8599bd8
SW
2330
2331 __check_cap_issue(ci, cap, newcaps);
2332
2333 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2334 inode->i_mode = le32_to_cpu(grant->mode);
2335 inode->i_uid = le32_to_cpu(grant->uid);
2336 inode->i_gid = le32_to_cpu(grant->gid);
2337 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2338 inode->i_uid, inode->i_gid);
2339 }
2340
2341 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2342 inode->i_nlink = le32_to_cpu(grant->nlink);
2343
2344 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2345 int len = le32_to_cpu(grant->xattr_len);
2346 u64 version = le64_to_cpu(grant->xattr_version);
2347
2348 if (version > ci->i_xattrs.version) {
2349 dout(" got new xattrs v%llu on %p len %d\n",
2350 version, inode, len);
2351 if (ci->i_xattrs.blob)
2352 ceph_buffer_put(ci->i_xattrs.blob);
2353 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2354 ci->i_xattrs.version = version;
2355 }
2356 }
2357
2358 /* size/ctime/mtime/atime? */
2359 ceph_fill_file_size(inode, issued,
2360 le32_to_cpu(grant->truncate_seq),
2361 le64_to_cpu(grant->truncate_size), size);
2362 ceph_decode_timespec(&mtime, &grant->mtime);
2363 ceph_decode_timespec(&atime, &grant->atime);
2364 ceph_decode_timespec(&ctime, &grant->ctime);
2365 ceph_fill_file_time(inode, issued,
2366 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2367 &atime);
2368
2369 /* max size increase? */
2370 if (max_size != ci->i_max_size) {
2371 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2372 ci->i_max_size = max_size;
2373 if (max_size >= ci->i_wanted_max_size) {
2374 ci->i_wanted_max_size = 0; /* reset */
2375 ci->i_requested_max_size = 0;
2376 }
2377 wake = 1;
2378 }
2379
2380 /* check cap bits */
2381 wanted = __ceph_caps_wanted(ci);
2382 used = __ceph_caps_used(ci);
2383 dirty = __ceph_caps_dirty(ci);
2384 dout(" my wanted = %s, used = %s, dirty %s\n",
2385 ceph_cap_string(wanted),
2386 ceph_cap_string(used),
2387 ceph_cap_string(dirty));
2388 if (wanted != le32_to_cpu(grant->wanted)) {
2389 dout("mds wanted %s -> %s\n",
2390 ceph_cap_string(le32_to_cpu(grant->wanted)),
2391 ceph_cap_string(wanted));
2392 grant->wanted = cpu_to_le32(wanted);
2393 }
2394
2395 cap->seq = seq;
d91f2438 2396 cap->issue_seq = issue_seq;
a8599bd8
SW
2397
2398 /* file layout may have changed */
2399 ci->i_layout = grant->layout;
2400
2401 /* revocation, grant, or no-op? */
2402 if (cap->issued & ~newcaps) {
3b454c49
SW
2403 int revoking = cap->issued & ~newcaps;
2404
2405 dout("revocation: %s -> %s (revoking %s)\n",
2406 ceph_cap_string(cap->issued),
2407 ceph_cap_string(newcaps),
2408 ceph_cap_string(revoking));
0eb6cd49 2409 if (revoking & used & CEPH_CAP_FILE_BUFFER)
3b454c49
SW
2410 writeback = 1; /* initiate writeback; will delay ack */
2411 else if (revoking == CEPH_CAP_FILE_CACHE &&
2412 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2413 queue_invalidate)
2414 ; /* do nothing yet, invalidation will be queued */
2415 else if (cap == ci->i_auth_cap)
2416 check_caps = 1; /* check auth cap only */
2417 else
2418 check_caps = 2; /* check all caps */
a8599bd8 2419 cap->issued = newcaps;
978097c9 2420 cap->implemented |= newcaps;
a8599bd8
SW
2421 } else if (cap->issued == newcaps) {
2422 dout("caps unchanged: %s -> %s\n",
2423 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2424 } else {
2425 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2426 ceph_cap_string(newcaps));
2427 cap->issued = newcaps;
2428 cap->implemented |= newcaps; /* add bits only, to
2429 * avoid stepping on a
2430 * pending revocation */
2431 wake = 1;
2432 }
978097c9 2433 BUG_ON(cap->issued & ~cap->implemented);
a8599bd8
SW
2434
2435 spin_unlock(&inode->i_lock);
3c6f6b79 2436 if (writeback)
a8599bd8
SW
2437 /*
2438 * queue inode for writeback: we can't actually call
2439 * filemap_write_and_wait, etc. from message handler
2440 * context.
2441 */
3c6f6b79
SW
2442 ceph_queue_writeback(inode);
2443 if (queue_invalidate)
2444 ceph_queue_invalidate(inode);
a8599bd8 2445 if (wake)
03066f23 2446 wake_up_all(&ci->i_cap_wq);
15637c8b
SW
2447
2448 if (check_caps == 1)
2449 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2450 session);
2451 else if (check_caps == 2)
2452 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2453 else
2454 mutex_unlock(&session->s_mutex);
a8599bd8
SW
2455}
2456
2457/*
2458 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2459 * MDS has been safely committed.
2460 */
6df058c0 2461static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
2462 struct ceph_mds_caps *m,
2463 struct ceph_mds_session *session,
2464 struct ceph_cap *cap)
2465 __releases(inode->i_lock)
2466{
2467 struct ceph_inode_info *ci = ceph_inode(inode);
640ef79d 2468 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8
SW
2469 unsigned seq = le32_to_cpu(m->seq);
2470 int dirty = le32_to_cpu(m->dirty);
2471 int cleaned = 0;
afcdaea3 2472 int drop = 0;
a8599bd8
SW
2473 int i;
2474
2475 for (i = 0; i < CEPH_CAP_BITS; i++)
2476 if ((dirty & (1 << i)) &&
2477 flush_tid == ci->i_cap_flush_tid[i])
2478 cleaned |= 1 << i;
2479
2480 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2481 " flushing %s -> %s\n",
2482 inode, session->s_mds, seq, ceph_cap_string(dirty),
2483 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2484 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2485
2486 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2487 goto out;
2488
a8599bd8 2489 ci->i_flushing_caps &= ~cleaned;
a8599bd8
SW
2490
2491 spin_lock(&mdsc->cap_dirty_lock);
2492 if (ci->i_flushing_caps == 0) {
2493 list_del_init(&ci->i_flushing_item);
2494 if (!list_empty(&session->s_cap_flushing))
2495 dout(" mds%d still flushing cap on %p\n",
2496 session->s_mds,
2497 &list_entry(session->s_cap_flushing.next,
2498 struct ceph_inode_info,
2499 i_flushing_item)->vfs_inode);
2500 mdsc->num_cap_flushing--;
03066f23 2501 wake_up_all(&mdsc->cap_flushing_wq);
a8599bd8 2502 dout(" inode %p now !flushing\n", inode);
afcdaea3
SW
2503
2504 if (ci->i_dirty_caps == 0) {
2505 dout(" inode %p now clean\n", inode);
2506 BUG_ON(!list_empty(&ci->i_dirty_item));
2507 drop = 1;
7d8cb26d
SW
2508 if (ci->i_wrbuffer_ref_head == 0) {
2509 BUG_ON(!ci->i_head_snapc);
2510 ceph_put_snap_context(ci->i_head_snapc);
2511 ci->i_head_snapc = NULL;
2512 }
76e3b390
SW
2513 } else {
2514 BUG_ON(list_empty(&ci->i_dirty_item));
afcdaea3 2515 }
a8599bd8
SW
2516 }
2517 spin_unlock(&mdsc->cap_dirty_lock);
03066f23 2518 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
2519
2520out:
2521 spin_unlock(&inode->i_lock);
afcdaea3 2522 if (drop)
a8599bd8
SW
2523 iput(inode);
2524}
2525
2526/*
2527 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
2528 * throw away our cap_snap.
2529 *
2530 * Caller hold s_mutex.
2531 */
6df058c0 2532static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
2533 struct ceph_mds_caps *m,
2534 struct ceph_mds_session *session)
2535{
2536 struct ceph_inode_info *ci = ceph_inode(inode);
2537 u64 follows = le64_to_cpu(m->snap_follows);
a8599bd8
SW
2538 struct ceph_cap_snap *capsnap;
2539 int drop = 0;
2540
2541 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2542 inode, ci, session->s_mds, follows);
2543
2544 spin_lock(&inode->i_lock);
2545 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2546 if (capsnap->follows == follows) {
2547 if (capsnap->flush_tid != flush_tid) {
2548 dout(" cap_snap %p follows %lld tid %lld !="
2549 " %lld\n", capsnap, follows,
2550 flush_tid, capsnap->flush_tid);
2551 break;
2552 }
2553 WARN_ON(capsnap->dirty_pages || capsnap->writing);
819ccbfa
SW
2554 dout(" removing %p cap_snap %p follows %lld\n",
2555 inode, capsnap, follows);
a8599bd8
SW
2556 ceph_put_snap_context(capsnap->context);
2557 list_del(&capsnap->ci_item);
2558 list_del(&capsnap->flushing_item);
2559 ceph_put_cap_snap(capsnap);
2560 drop = 1;
2561 break;
2562 } else {
2563 dout(" skipping cap_snap %p follows %lld\n",
2564 capsnap, capsnap->follows);
2565 }
2566 }
2567 spin_unlock(&inode->i_lock);
2568 if (drop)
2569 iput(inode);
2570}
2571
2572/*
2573 * Handle TRUNC from MDS, indicating file truncation.
2574 *
2575 * caller hold s_mutex.
2576 */
2577static void handle_cap_trunc(struct inode *inode,
2578 struct ceph_mds_caps *trunc,
2579 struct ceph_mds_session *session)
2580 __releases(inode->i_lock)
2581{
2582 struct ceph_inode_info *ci = ceph_inode(inode);
2583 int mds = session->s_mds;
2584 int seq = le32_to_cpu(trunc->seq);
2585 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2586 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2587 u64 size = le64_to_cpu(trunc->size);
2588 int implemented = 0;
2589 int dirty = __ceph_caps_dirty(ci);
2590 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2591 int queue_trunc = 0;
2592
2593 issued |= implemented | dirty;
2594
2595 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2596 inode, mds, seq, truncate_size, truncate_seq);
2597 queue_trunc = ceph_fill_file_size(inode, issued,
2598 truncate_seq, truncate_size, size);
2599 spin_unlock(&inode->i_lock);
2600
2601 if (queue_trunc)
3c6f6b79 2602 ceph_queue_vmtruncate(inode);
a8599bd8
SW
2603}
2604
2605/*
2606 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
2607 * different one. If we are the most recent migration we've seen (as
2608 * indicated by mseq), make note of the migrating cap bits for the
2609 * duration (until we see the corresponding IMPORT).
2610 *
2611 * caller holds s_mutex
2612 */
2613static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
154f42c2
SW
2614 struct ceph_mds_session *session,
2615 int *open_target_sessions)
a8599bd8
SW
2616{
2617 struct ceph_inode_info *ci = ceph_inode(inode);
2618 int mds = session->s_mds;
2619 unsigned mseq = le32_to_cpu(ex->migrate_seq);
2620 struct ceph_cap *cap = NULL, *t;
2621 struct rb_node *p;
2622 int remember = 1;
2623
2624 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2625 inode, ci, mds, mseq);
2626
2627 spin_lock(&inode->i_lock);
2628
2629 /* make sure we haven't seen a higher mseq */
2630 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2631 t = rb_entry(p, struct ceph_cap, ci_node);
2632 if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2633 dout(" higher mseq on cap from mds%d\n",
2634 t->session->s_mds);
2635 remember = 0;
2636 }
2637 if (t->session->s_mds == mds)
2638 cap = t;
2639 }
2640
2641 if (cap) {
2642 if (remember) {
2643 /* make note */
2644 ci->i_cap_exporting_mds = mds;
2645 ci->i_cap_exporting_mseq = mseq;
2646 ci->i_cap_exporting_issued = cap->issued;
154f42c2
SW
2647
2648 /*
2649 * make sure we have open sessions with all possible
2650 * export targets, so that we get the matching IMPORT
2651 */
2652 *open_target_sessions = 1;
a8599bd8 2653 }
7c1332b8 2654 __ceph_remove_cap(cap);
a8599bd8 2655 }
4ea0043a 2656 /* else, we already released it */
a8599bd8
SW
2657
2658 spin_unlock(&inode->i_lock);
2659}
2660
2661/*
2662 * Handle cap IMPORT. If there are temp bits from an older EXPORT,
2663 * clean them up.
2664 *
2665 * caller holds s_mutex.
2666 */
2667static void handle_cap_import(struct ceph_mds_client *mdsc,
2668 struct inode *inode, struct ceph_mds_caps *im,
2669 struct ceph_mds_session *session,
2670 void *snaptrace, int snaptrace_len)
2671{
2672 struct ceph_inode_info *ci = ceph_inode(inode);
2673 int mds = session->s_mds;
2674 unsigned issued = le32_to_cpu(im->caps);
2675 unsigned wanted = le32_to_cpu(im->wanted);
2676 unsigned seq = le32_to_cpu(im->seq);
2677 unsigned mseq = le32_to_cpu(im->migrate_seq);
2678 u64 realmino = le64_to_cpu(im->realm);
2679 u64 cap_id = le64_to_cpu(im->cap_id);
2680
2681 if (ci->i_cap_exporting_mds >= 0 &&
2682 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2683 dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2684 " - cleared exporting from mds%d\n",
2685 inode, ci, mds, mseq,
2686 ci->i_cap_exporting_mds);
2687 ci->i_cap_exporting_issued = 0;
2688 ci->i_cap_exporting_mseq = 0;
2689 ci->i_cap_exporting_mds = -1;
2690 } else {
2691 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2692 inode, ci, mds, mseq);
2693 }
2694
2695 down_write(&mdsc->snap_rwsem);
2696 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2697 false);
2698 downgrade_write(&mdsc->snap_rwsem);
2699 ceph_add_cap(inode, session, cap_id, -1,
2700 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2701 NULL /* no caps context */);
2702 try_flush_caps(inode, session, NULL);
2703 up_read(&mdsc->snap_rwsem);
2704}
2705
2706/*
2707 * Handle a caps message from the MDS.
2708 *
2709 * Identify the appropriate session, inode, and call the right handler
2710 * based on the cap op.
2711 */
2712void ceph_handle_caps(struct ceph_mds_session *session,
2713 struct ceph_msg *msg)
2714{
2715 struct ceph_mds_client *mdsc = session->s_mdsc;
2716 struct super_block *sb = mdsc->client->sb;
2717 struct inode *inode;
2718 struct ceph_cap *cap;
2719 struct ceph_mds_caps *h;
2600d2dd 2720 int mds = session->s_mds;
a8599bd8 2721 int op;
3d7ded4d 2722 u32 seq, mseq;
a8599bd8
SW
2723 struct ceph_vino vino;
2724 u64 cap_id;
2725 u64 size, max_size;
6df058c0 2726 u64 tid;
70edb55b 2727 void *snaptrace;
ce1fbc8d
SW
2728 size_t snaptrace_len;
2729 void *flock;
2730 u32 flock_len;
154f42c2 2731 int open_target_sessions = 0;
a8599bd8
SW
2732
2733 dout("handle_caps from mds%d\n", mds);
2734
2735 /* decode */
6df058c0 2736 tid = le64_to_cpu(msg->hdr.tid);
a8599bd8
SW
2737 if (msg->front.iov_len < sizeof(*h))
2738 goto bad;
2739 h = msg->front.iov_base;
2740 op = le32_to_cpu(h->op);
2741 vino.ino = le64_to_cpu(h->ino);
2742 vino.snap = CEPH_NOSNAP;
2743 cap_id = le64_to_cpu(h->cap_id);
2744 seq = le32_to_cpu(h->seq);
3d7ded4d 2745 mseq = le32_to_cpu(h->migrate_seq);
a8599bd8
SW
2746 size = le64_to_cpu(h->size);
2747 max_size = le64_to_cpu(h->max_size);
2748
ce1fbc8d
SW
2749 snaptrace = h + 1;
2750 snaptrace_len = le32_to_cpu(h->snap_trace_len);
2751
2752 if (le16_to_cpu(msg->hdr.version) >= 2) {
2753 void *p, *end;
2754
2755 p = snaptrace + snaptrace_len;
2756 end = msg->front.iov_base + msg->front.iov_len;
2757 ceph_decode_32_safe(&p, end, flock_len, bad);
2758 flock = p;
2759 } else {
2760 flock = NULL;
2761 flock_len = 0;
2762 }
2763
a8599bd8
SW
2764 mutex_lock(&session->s_mutex);
2765 session->s_seq++;
2766 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2767 (unsigned)seq);
2768
2769 /* lookup ino */
2770 inode = ceph_find_inode(sb, vino);
2771 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2772 vino.snap, inode);
2773 if (!inode) {
2774 dout(" i don't have ino %llx\n", vino.ino);
3d7ded4d
SW
2775
2776 if (op == CEPH_CAP_OP_IMPORT)
2777 __queue_cap_release(session, vino.ino, cap_id,
2778 mseq, seq);
21b559de 2779 goto flush_cap_releases;
a8599bd8
SW
2780 }
2781
2782 /* these will work even if we don't have a cap yet */
2783 switch (op) {
2784 case CEPH_CAP_OP_FLUSHSNAP_ACK:
6df058c0 2785 handle_cap_flushsnap_ack(inode, tid, h, session);
a8599bd8
SW
2786 goto done;
2787
2788 case CEPH_CAP_OP_EXPORT:
154f42c2 2789 handle_cap_export(inode, h, session, &open_target_sessions);
a8599bd8
SW
2790 goto done;
2791
2792 case CEPH_CAP_OP_IMPORT:
2793 handle_cap_import(mdsc, inode, h, session,
ce1fbc8d 2794 snaptrace, snaptrace_len);
15637c8b
SW
2795 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
2796 session);
2797 goto done_unlocked;
a8599bd8
SW
2798 }
2799
2800 /* the rest require a cap */
2801 spin_lock(&inode->i_lock);
2802 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2803 if (!cap) {
9dbd412f 2804 dout(" no cap on %p ino %llx.%llx from mds%d\n",
a8599bd8
SW
2805 inode, ceph_ino(inode), ceph_snap(inode), mds);
2806 spin_unlock(&inode->i_lock);
21b559de 2807 goto flush_cap_releases;
a8599bd8
SW
2808 }
2809
2810 /* note that each of these drops i_lock for us */
2811 switch (op) {
2812 case CEPH_CAP_OP_REVOKE:
2813 case CEPH_CAP_OP_GRANT:
15637c8b
SW
2814 handle_cap_grant(inode, h, session, cap, msg->middle);
2815 goto done_unlocked;
a8599bd8
SW
2816
2817 case CEPH_CAP_OP_FLUSH_ACK:
6df058c0 2818 handle_cap_flush_ack(inode, tid, h, session, cap);
a8599bd8
SW
2819 break;
2820
2821 case CEPH_CAP_OP_TRUNC:
2822 handle_cap_trunc(inode, h, session);
2823 break;
2824
2825 default:
2826 spin_unlock(&inode->i_lock);
2827 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2828 ceph_cap_op_name(op));
2829 }
2830
21b559de
GF
2831 goto done;
2832
2833flush_cap_releases:
2834 /*
2835 * send any full release message to try to move things
2836 * along for the mds (who clearly thinks we still have this
2837 * cap).
2838 */
2839 ceph_add_cap_releases(mdsc, session);
2840 ceph_send_cap_releases(mdsc, session);
2841
a8599bd8 2842done:
15637c8b
SW
2843 mutex_unlock(&session->s_mutex);
2844done_unlocked:
a8599bd8
SW
2845 if (inode)
2846 iput(inode);
154f42c2
SW
2847 if (open_target_sessions)
2848 ceph_mdsc_open_export_target_sessions(mdsc, session);
a8599bd8
SW
2849 return;
2850
2851bad:
2852 pr_err("ceph_handle_caps: corrupt message\n");
9ec7cab1 2853 ceph_msg_dump(msg);
a8599bd8
SW
2854 return;
2855}
2856
2857/*
2858 * Delayed work handler to process end of delayed cap release LRU list.
2859 */
afcdaea3 2860void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
a8599bd8
SW
2861{
2862 struct ceph_inode_info *ci;
2863 int flags = CHECK_CAPS_NODELAY;
2864
a8599bd8
SW
2865 dout("check_delayed_caps\n");
2866 while (1) {
2867 spin_lock(&mdsc->cap_delay_lock);
2868 if (list_empty(&mdsc->cap_delay_list))
2869 break;
2870 ci = list_first_entry(&mdsc->cap_delay_list,
2871 struct ceph_inode_info,
2872 i_cap_delay_list);
2873 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2874 time_before(jiffies, ci->i_hold_caps_max))
2875 break;
2876 list_del_init(&ci->i_cap_delay_list);
2877 spin_unlock(&mdsc->cap_delay_lock);
2878 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2879 ceph_check_caps(ci, flags, NULL);
2880 }
2881 spin_unlock(&mdsc->cap_delay_lock);
2882}
2883
afcdaea3
SW
2884/*
2885 * Flush all dirty caps to the mds
2886 */
2887void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2888{
e9964c10
SW
2889 struct ceph_inode_info *ci, *nci = NULL;
2890 struct inode *inode, *ninode = NULL;
2891 struct list_head *p, *n;
afcdaea3
SW
2892
2893 dout("flush_dirty_caps\n");
2894 spin_lock(&mdsc->cap_dirty_lock);
e9964c10
SW
2895 list_for_each_safe(p, n, &mdsc->cap_dirty) {
2896 if (nci) {
2897 ci = nci;
2898 inode = ninode;
2899 ci->i_ceph_flags &= ~CEPH_I_NOFLUSH;
2900 dout("flush_dirty_caps inode %p (was next inode)\n",
2901 inode);
2902 } else {
2903 ci = list_entry(p, struct ceph_inode_info,
2904 i_dirty_item);
2905 inode = igrab(&ci->vfs_inode);
2906 BUG_ON(!inode);
2907 dout("flush_dirty_caps inode %p\n", inode);
2908 }
2909 if (n != &mdsc->cap_dirty) {
2910 nci = list_entry(n, struct ceph_inode_info,
2911 i_dirty_item);
2912 ninode = igrab(&nci->vfs_inode);
2913 BUG_ON(!ninode);
2914 nci->i_ceph_flags |= CEPH_I_NOFLUSH;
2915 dout("flush_dirty_caps next inode %p, noflush\n",
2916 ninode);
2917 } else {
2918 nci = NULL;
2919 ninode = NULL;
2920 }
afcdaea3
SW
2921 spin_unlock(&mdsc->cap_dirty_lock);
2922 if (inode) {
2923 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
2924 NULL);
2925 iput(inode);
2926 }
2927 spin_lock(&mdsc->cap_dirty_lock);
2928 }
2929 spin_unlock(&mdsc->cap_dirty_lock);
2930}
2931
a8599bd8
SW
2932/*
2933 * Drop open file reference. If we were the last open file,
2934 * we may need to release capabilities to the MDS (or schedule
2935 * their delayed release).
2936 */
2937void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2938{
2939 struct inode *inode = &ci->vfs_inode;
2940 int last = 0;
2941
2942 spin_lock(&inode->i_lock);
2943 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2944 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2945 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2946 if (--ci->i_nr_by_mode[fmode] == 0)
2947 last++;
2948 spin_unlock(&inode->i_lock);
2949
2950 if (last && ci->i_vino.snap == CEPH_NOSNAP)
2951 ceph_check_caps(ci, 0, NULL);
2952}
2953
2954/*
2955 * Helpers for embedding cap and dentry lease releases into mds
2956 * requests.
2957 *
2958 * @force is used by dentry_release (below) to force inclusion of a
2959 * record for the directory inode, even when there aren't any caps to
2960 * drop.
2961 */
2962int ceph_encode_inode_release(void **p, struct inode *inode,
2963 int mds, int drop, int unless, int force)
2964{
2965 struct ceph_inode_info *ci = ceph_inode(inode);
2966 struct ceph_cap *cap;
2967 struct ceph_mds_request_release *rel = *p;
ec97f88b 2968 int used, dirty;
a8599bd8 2969 int ret = 0;
a8599bd8
SW
2970
2971 spin_lock(&inode->i_lock);
916623da 2972 used = __ceph_caps_used(ci);
ec97f88b 2973 dirty = __ceph_caps_dirty(ci);
916623da 2974
ec97f88b
SW
2975 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
2976 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
916623da
SW
2977 ceph_cap_string(unless));
2978
ec97f88b
SW
2979 /* only drop unused, clean caps */
2980 drop &= ~(used | dirty);
916623da 2981
a8599bd8
SW
2982 cap = __get_cap_for_mds(ci, mds);
2983 if (cap && __cap_is_valid(cap)) {
2984 if (force ||
2985 ((cap->issued & drop) &&
2986 (cap->issued & unless) == 0)) {
2987 if ((cap->issued & drop) &&
2988 (cap->issued & unless) == 0) {
2989 dout("encode_inode_release %p cap %p %s -> "
2990 "%s\n", inode, cap,
2991 ceph_cap_string(cap->issued),
2992 ceph_cap_string(cap->issued & ~drop));
2993 cap->issued &= ~drop;
2994 cap->implemented &= ~drop;
2995 if (ci->i_ceph_flags & CEPH_I_NODELAY) {
2996 int wanted = __ceph_caps_wanted(ci);
2997 dout(" wanted %s -> %s (act %s)\n",
2998 ceph_cap_string(cap->mds_wanted),
2999 ceph_cap_string(cap->mds_wanted &
3000 ~wanted),
3001 ceph_cap_string(wanted));
3002 cap->mds_wanted &= wanted;
3003 }
3004 } else {
3005 dout("encode_inode_release %p cap %p %s"
3006 " (force)\n", inode, cap,
3007 ceph_cap_string(cap->issued));
3008 }
3009
3010 rel->ino = cpu_to_le64(ceph_ino(inode));
3011 rel->cap_id = cpu_to_le64(cap->cap_id);
3012 rel->seq = cpu_to_le32(cap->seq);
3013 rel->issue_seq = cpu_to_le32(cap->issue_seq),
3014 rel->mseq = cpu_to_le32(cap->mseq);
3015 rel->caps = cpu_to_le32(cap->issued);
3016 rel->wanted = cpu_to_le32(cap->mds_wanted);
3017 rel->dname_len = 0;
3018 rel->dname_seq = 0;
3019 *p += sizeof(*rel);
3020 ret = 1;
3021 } else {
3022 dout("encode_inode_release %p cap %p %s\n",
3023 inode, cap, ceph_cap_string(cap->issued));
3024 }
3025 }
3026 spin_unlock(&inode->i_lock);
3027 return ret;
3028}
3029
3030int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3031 int mds, int drop, int unless)
3032{
3033 struct inode *dir = dentry->d_parent->d_inode;
3034 struct ceph_mds_request_release *rel = *p;
3035 struct ceph_dentry_info *di = ceph_dentry(dentry);
3036 int force = 0;
3037 int ret;
3038
3039 /*
3040 * force an record for the directory caps if we have a dentry lease.
3041 * this is racy (can't take i_lock and d_lock together), but it
3042 * doesn't have to be perfect; the mds will revoke anything we don't
3043 * release.
3044 */
3045 spin_lock(&dentry->d_lock);
3046 if (di->lease_session && di->lease_session->s_mds == mds)
3047 force = 1;
3048 spin_unlock(&dentry->d_lock);
3049
3050 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3051
3052 spin_lock(&dentry->d_lock);
3053 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3054 dout("encode_dentry_release %p mds%d seq %d\n",
3055 dentry, mds, (int)di->lease_seq);
3056 rel->dname_len = cpu_to_le32(dentry->d_name.len);
3057 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3058 *p += dentry->d_name.len;
3059 rel->dname_seq = cpu_to_le32(di->lease_seq);
1dadcce3 3060 __ceph_mdsc_drop_dentry_lease(dentry);
a8599bd8
SW
3061 }
3062 spin_unlock(&dentry->d_lock);
3063 return ret;
3064}