]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ceph/file.c
ceph: fix copy_user_to_page_vector()
[net-next-2.6.git] / fs / ceph / file.c
CommitLineData
124e68e7
SW
1#include "ceph_debug.h"
2
3#include <linux/sched.h>
4#include <linux/file.h>
5#include <linux/namei.h>
6#include <linux/writeback.h>
7
8#include "super.h"
9#include "mds_client.h"
10
11/*
12 * Ceph file operations
13 *
14 * Implement basic open/close functionality, and implement
15 * read/write.
16 *
17 * We implement three modes of file I/O:
18 * - buffered uses the generic_file_aio_{read,write} helpers
19 *
20 * - synchronous is used when there is multi-client read/write
21 * sharing, avoids the page cache, and synchronously waits for an
22 * ack from the OSD.
23 *
24 * - direct io takes the variant of the sync path that references
25 * user pages directly.
26 *
27 * fsync() flushes and waits on dirty pages, but just queues metadata
28 * for writeback: since the MDS can recover size and mtime there is no
29 * need to wait for MDS acknowledgement.
30 */
31
32
33/*
34 * Prepare an open request. Preallocate ceph_cap to avoid an
35 * inopportune ENOMEM later.
36 */
37static struct ceph_mds_request *
38prepare_open_request(struct super_block *sb, int flags, int create_mode)
39{
40 struct ceph_client *client = ceph_sb_to_client(sb);
41 struct ceph_mds_client *mdsc = &client->mdsc;
42 struct ceph_mds_request *req;
43 int want_auth = USE_ANY_MDS;
44 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
45
46 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
47 want_auth = USE_AUTH_MDS;
48
49 req = ceph_mdsc_create_request(mdsc, op, want_auth);
50 if (IS_ERR(req))
51 goto out;
52 req->r_fmode = ceph_flags_to_mode(flags);
53 req->r_args.open.flags = cpu_to_le32(flags);
54 req->r_args.open.mode = cpu_to_le32(create_mode);
6a18be16 55 req->r_args.open.preferred = cpu_to_le32(-1);
124e68e7
SW
56out:
57 return req;
58}
59
60/*
61 * initialize private struct file data.
62 * if we fail, clean up by dropping fmode reference on the ceph_inode
63 */
64static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
65{
66 struct ceph_file_info *cf;
67 int ret = 0;
68
69 switch (inode->i_mode & S_IFMT) {
70 case S_IFREG:
71 case S_IFDIR:
72 dout("init_file %p %p 0%o (regular)\n", inode, file,
73 inode->i_mode);
74 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
75 if (cf == NULL) {
76 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
77 return -ENOMEM;
78 }
79 cf->fmode = fmode;
80 cf->next_offset = 2;
81 file->private_data = cf;
82 BUG_ON(inode->i_fop->release != ceph_release);
83 break;
84
85 case S_IFLNK:
86 dout("init_file %p %p 0%o (symlink)\n", inode, file,
87 inode->i_mode);
88 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
89 break;
90
91 default:
92 dout("init_file %p %p 0%o (special)\n", inode, file,
93 inode->i_mode);
94 /*
95 * we need to drop the open ref now, since we don't
96 * have .release set to ceph_release.
97 */
98 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
99 BUG_ON(inode->i_fop->release == ceph_release);
100
101 /* call the proper open fop */
102 ret = inode->i_fop->open(inode, file);
103 }
104 return ret;
105}
106
107/*
108 * If the filp already has private_data, that means the file was
109 * already opened by intent during lookup, and we do nothing.
110 *
111 * If we already have the requisite capabilities, we can satisfy
112 * the open request locally (no need to request new caps from the
113 * MDS). We do, however, need to inform the MDS (asynchronously)
114 * if our wanted caps set expands.
115 */
116int ceph_open(struct inode *inode, struct file *file)
117{
118 struct ceph_inode_info *ci = ceph_inode(inode);
119 struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
120 struct ceph_mds_client *mdsc = &client->mdsc;
121 struct ceph_mds_request *req;
122 struct ceph_file_info *cf = file->private_data;
123 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
124 int err;
125 int flags, fmode, wanted;
126
127 if (cf) {
128 dout("open file %p is already opened\n", file);
129 return 0;
130 }
131
132 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
133 flags = file->f_flags & ~(O_CREAT|O_EXCL);
134 if (S_ISDIR(inode->i_mode))
135 flags = O_DIRECTORY; /* mds likes to know */
136
137 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
138 ceph_vinop(inode), file, flags, file->f_flags);
139 fmode = ceph_flags_to_mode(flags);
140 wanted = ceph_caps_for_mode(fmode);
141
142 /* snapped files are read-only */
143 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
144 return -EROFS;
145
146 /* trivially open snapdir */
147 if (ceph_snap(inode) == CEPH_SNAPDIR) {
148 spin_lock(&inode->i_lock);
149 __ceph_get_fmode(ci, fmode);
150 spin_unlock(&inode->i_lock);
151 return ceph_init_file(inode, file, fmode);
152 }
153
154 /*
155 * No need to block if we have any caps. Update wanted set
156 * asynchronously.
157 */
158 spin_lock(&inode->i_lock);
159 if (__ceph_is_any_real_caps(ci)) {
160 int mds_wanted = __ceph_caps_mds_wanted(ci);
161 int issued = __ceph_caps_issued(ci, NULL);
162
163 dout("open %p fmode %d want %s issued %s using existing\n",
164 inode, fmode, ceph_cap_string(wanted),
165 ceph_cap_string(issued));
166 __ceph_get_fmode(ci, fmode);
167 spin_unlock(&inode->i_lock);
168
169 /* adjust wanted? */
170 if ((issued & wanted) != wanted &&
171 (mds_wanted & wanted) != wanted &&
172 ceph_snap(inode) != CEPH_SNAPDIR)
173 ceph_check_caps(ci, 0, NULL);
174
175 return ceph_init_file(inode, file, fmode);
176 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
177 (ci->i_snap_caps & wanted) == wanted) {
178 __ceph_get_fmode(ci, fmode);
179 spin_unlock(&inode->i_lock);
180 return ceph_init_file(inode, file, fmode);
181 }
182 spin_unlock(&inode->i_lock);
183
184 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
185 req = prepare_open_request(inode->i_sb, flags, 0);
186 if (IS_ERR(req)) {
187 err = PTR_ERR(req);
188 goto out;
189 }
190 req->r_inode = igrab(inode);
191 req->r_num_caps = 1;
192 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
193 if (!err)
194 err = ceph_init_file(inode, file, req->r_fmode);
195 ceph_mdsc_put_request(req);
196 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
197out:
198 return err;
199}
200
201
202/*
203 * Do a lookup + open with a single request.
204 *
205 * If this succeeds, but some subsequent check in the vfs
206 * may_open() fails, the struct *file gets cleaned up (i.e.
207 * ceph_release gets called). So fear not!
208 */
209/*
210 * flags
211 * path_lookup_open -> LOOKUP_OPEN
212 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
213 */
214struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
215 struct nameidata *nd, int mode,
216 int locked_dir)
217{
218 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
219 struct ceph_mds_client *mdsc = &client->mdsc;
220 struct file *file = nd->intent.open.file;
221 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
222 struct ceph_mds_request *req;
223 int err;
224 int flags = nd->intent.open.flags - 1; /* silly vfs! */
225
226 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
227 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
228
229 /* do the open */
230 req = prepare_open_request(dir->i_sb, flags, mode);
231 if (IS_ERR(req))
232 return ERR_PTR(PTR_ERR(req));
233 req->r_dentry = dget(dentry);
234 req->r_num_caps = 2;
235 if (flags & O_CREAT) {
236 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
237 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
238 }
239 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
240 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
241 dentry = ceph_finish_lookup(req, dentry, err);
242 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
243 err = ceph_handle_notrace_create(dir, dentry);
244 if (!err)
245 err = ceph_init_file(req->r_dentry->d_inode, file,
246 req->r_fmode);
247 ceph_mdsc_put_request(req);
248 dout("ceph_lookup_open result=%p\n", dentry);
249 return dentry;
250}
251
252int ceph_release(struct inode *inode, struct file *file)
253{
254 struct ceph_inode_info *ci = ceph_inode(inode);
255 struct ceph_file_info *cf = file->private_data;
256
257 dout("release inode %p file %p\n", inode, file);
258 ceph_put_fmode(ci, cf->fmode);
259 if (cf->last_readdir)
260 ceph_mdsc_put_request(cf->last_readdir);
261 kfree(cf->last_name);
262 kfree(cf->dir_info);
263 dput(cf->dentry);
264 kmem_cache_free(ceph_file_cachep, cf);
265 return 0;
266}
267
268/*
269 * build a vector of user pages
270 */
271static struct page **get_direct_page_vector(const char __user *data,
272 int num_pages,
273 loff_t off, size_t len)
274{
275 struct page **pages;
276 int rc;
277
278 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
279 if (!pages)
280 return ERR_PTR(-ENOMEM);
281
282 down_read(&current->mm->mmap_sem);
283 rc = get_user_pages(current, current->mm, (unsigned long)data,
284 num_pages, 0, 0, pages, NULL);
285 up_read(&current->mm->mmap_sem);
286 if (rc < 0)
287 goto fail;
288 return pages;
289
290fail:
291 kfree(pages);
292 return ERR_PTR(rc);
293}
294
295static void put_page_vector(struct page **pages, int num_pages)
296{
297 int i;
298
299 for (i = 0; i < num_pages; i++)
300 put_page(pages[i]);
301 kfree(pages);
302}
303
304void ceph_release_page_vector(struct page **pages, int num_pages)
305{
306 int i;
307
308 for (i = 0; i < num_pages; i++)
309 __free_pages(pages[i], 0);
310 kfree(pages);
311}
312
313/*
314 * allocate a vector new pages
315 */
316static struct page **alloc_page_vector(int num_pages)
317{
318 struct page **pages;
319 int i;
320
321 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
322 if (!pages)
323 return ERR_PTR(-ENOMEM);
324 for (i = 0; i < num_pages; i++) {
325 pages[i] = alloc_page(GFP_NOFS);
326 if (pages[i] == NULL) {
327 ceph_release_page_vector(pages, i);
328 return ERR_PTR(-ENOMEM);
329 }
330 }
331 return pages;
332}
333
334/*
335 * copy user data into a page vector
336 */
337static int copy_user_to_page_vector(struct page **pages,
338 const char __user *data,
339 loff_t off, size_t len)
340{
341 int i = 0;
342 int po = off & ~PAGE_CACHE_MASK;
343 int left = len;
344 int l, bad;
345
346 while (left > 0) {
347 l = min_t(int, PAGE_CACHE_SIZE-po, left);
348 bad = copy_from_user(page_address(pages[i]) + po, data, l);
349 if (bad == l)
350 return -EFAULT;
351 data += l - bad;
352 left -= l - bad;
6a4ef481
YS
353 po += l - bad;
354 if (po == PAGE_CACHE_SIZE) {
355 po = 0;
356 i++;
124e68e7
SW
357 }
358 }
359 return len;
360}
361
362/*
363 * copy user data from a page vector into a user pointer
364 */
365static int copy_page_vector_to_user(struct page **pages, char __user *data,
366 loff_t off, size_t len)
367{
368 int i = 0;
369 int po = off & ~PAGE_CACHE_MASK;
370 int left = len;
371 int l, bad;
372
373 while (left > 0) {
374 l = min_t(int, left, PAGE_CACHE_SIZE-po);
375 bad = copy_to_user(data, page_address(pages[i]) + po, l);
376 if (bad == l)
377 return -EFAULT;
378 data += l - bad;
379 left -= l - bad;
380 if (po) {
381 po += l - bad;
382 if (po == PAGE_CACHE_SIZE)
383 po = 0;
384 }
385 i++;
386 }
387 return len;
388}
389
390/*
391 * Zero an extent within a page vector. Offset is relative to the
392 * start of the first page.
393 */
394static void zero_page_vector_range(int off, int len, struct page **pages)
395{
396 int i = off >> PAGE_CACHE_SHIFT;
397
398 dout("zero_page_vector_page %u~%u\n", off, len);
399 BUG_ON(len < PAGE_CACHE_SIZE);
400
401 /* leading partial page? */
402 if (off & ~PAGE_CACHE_MASK) {
403 dout("zeroing %d %p head from %d\n", i, pages[i],
404 (int)(off & ~PAGE_CACHE_MASK));
405 zero_user_segment(pages[i], off & ~PAGE_CACHE_MASK,
406 PAGE_CACHE_SIZE);
407 off += PAGE_CACHE_SIZE;
408 off &= PAGE_CACHE_MASK;
409 i++;
410 }
411 while (len >= PAGE_CACHE_SIZE) {
412 dout("zeroing %d %p\n", i, pages[i]);
413 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
414 off += PAGE_CACHE_SIZE;
415 len -= PAGE_CACHE_SIZE;
416 i++;
417 }
418 /* trailing partial page? */
419 if (len) {
420 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
421 zero_user_segment(pages[i], 0, len);
422 }
423}
424
425
426/*
427 * Read a range of bytes striped over one or more objects. Iterate over
428 * objects we stripe over. (That's not atomic, but good enough for now.)
429 *
430 * If we get a short result from the OSD, check against i_size; we need to
431 * only return a short read to the caller if we hit EOF.
432 */
433static int striped_read(struct inode *inode,
434 u64 off, u64 len,
435 struct page **pages, int num_pages)
436{
437 struct ceph_client *client = ceph_inode_to_client(inode);
438 struct ceph_inode_info *ci = ceph_inode(inode);
439 u64 pos, this_len;
440 int page_off = off & ~PAGE_CACHE_SIZE; /* first byte's offset in page */
441 int left, pages_left;
442 int read;
443 struct page **page_pos;
444 int ret;
445 bool hit_stripe, was_short;
446
447 /*
448 * we may need to do multiple reads. not atomic, unfortunately.
449 */
450 pos = off;
451 left = len;
452 page_pos = pages;
453 pages_left = num_pages;
454 read = 0;
455
456more:
457 this_len = left;
458 ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode),
459 &ci->i_layout, pos, &this_len,
460 ci->i_truncate_seq,
461 ci->i_truncate_size,
462 page_pos, pages_left);
463 hit_stripe = this_len < left;
464 was_short = ret >= 0 && ret < this_len;
465 if (ret == -ENOENT)
466 ret = 0;
467 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
468 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
469
470 if (ret > 0) {
471 int didpages =
472 ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT;
473
474 if (read < pos - off) {
475 dout(" zero gap %llu to %llu\n", off + read, pos);
476 zero_page_vector_range(page_off + read,
477 pos - off - read, pages);
478 }
479 pos += ret;
480 read = pos - off;
481 left -= ret;
482 page_pos += didpages;
483 pages_left -= didpages;
484
485 /* hit stripe? */
486 if (left && hit_stripe)
487 goto more;
488 }
489
490 if (was_short) {
491 /* was original extent fully inside i_size? */
492 if (pos + left <= inode->i_size) {
493 dout("zero tail\n");
494 zero_page_vector_range(page_off + read, len - read,
495 pages);
496 goto out;
497 }
498
499 /* check i_size */
500 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
501 if (ret < 0)
502 goto out;
503
504 /* hit EOF? */
505 if (pos >= inode->i_size)
506 goto out;
507
508 goto more;
509 }
510
511out:
512 if (ret >= 0)
513 ret = read;
514 dout("striped_read returns %d\n", ret);
515 return ret;
516}
517
518/*
519 * Completely synchronous read and write methods. Direct from __user
520 * buffer to osd, or directly to user pages (if O_DIRECT).
521 *
522 * If the read spans object boundary, just do multiple reads.
523 */
524static ssize_t ceph_sync_read(struct file *file, char __user *data,
525 unsigned len, loff_t *poff)
526{
527 struct inode *inode = file->f_dentry->d_inode;
528 struct page **pages;
529 u64 off = *poff;
530 int num_pages = calc_pages_for(off, len);
531 int ret;
532
533 dout("sync_read on file %p %llu~%u %s\n", file, off, len,
534 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
535
536 if (file->f_flags & O_DIRECT) {
537 pages = get_direct_page_vector(data, num_pages, off, len);
538
539 /*
540 * flush any page cache pages in this range. this
541 * will make concurrent normal and O_DIRECT io slow,
542 * but it will at least behave sensibly when they are
543 * in sequence.
544 */
545 filemap_write_and_wait(inode->i_mapping);
546 } else {
547 pages = alloc_page_vector(num_pages);
548 }
549 if (IS_ERR(pages))
550 return PTR_ERR(pages);
551
552 ret = striped_read(inode, off, len, pages, num_pages);
553
554 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
555 ret = copy_page_vector_to_user(pages, data, off, ret);
556 if (ret >= 0)
557 *poff = off + ret;
558
559 if (file->f_flags & O_DIRECT)
560 put_page_vector(pages, num_pages);
561 else
562 ceph_release_page_vector(pages, num_pages);
563 dout("sync_read result %d\n", ret);
564 return ret;
565}
566
567/*
568 * Write commit callback, called if we requested both an ACK and
569 * ONDISK commit reply from the OSD.
570 */
571static void sync_write_commit(struct ceph_osd_request *req,
572 struct ceph_msg *msg)
573{
574 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
575
576 dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
577 spin_lock(&ci->i_unsafe_lock);
578 list_del_init(&req->r_unsafe_item);
579 spin_unlock(&ci->i_unsafe_lock);
580 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
581}
582
583/*
584 * Synchronous write, straight from __user pointer or user pages (if
585 * O_DIRECT).
586 *
587 * If write spans object boundary, just do multiple writes. (For a
588 * correct atomic write, we should e.g. take write locks on all
589 * objects, rollback on failure, etc.)
590 */
591static ssize_t ceph_sync_write(struct file *file, const char __user *data,
592 size_t left, loff_t *offset)
593{
594 struct inode *inode = file->f_dentry->d_inode;
595 struct ceph_inode_info *ci = ceph_inode(inode);
596 struct ceph_client *client = ceph_inode_to_client(inode);
597 struct ceph_osd_request *req;
598 struct page **pages;
599 int num_pages;
600 long long unsigned pos;
601 u64 len;
602 int written = 0;
603 int flags;
604 int do_sync = 0;
605 int check_caps = 0;
606 int ret;
607 struct timespec mtime = CURRENT_TIME;
608
609 if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
610 return -EROFS;
611
612 dout("sync_write on file %p %lld~%u %s\n", file, *offset,
613 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
614
615 if (file->f_flags & O_APPEND)
616 pos = i_size_read(inode);
617 else
618 pos = *offset;
619
620 flags = CEPH_OSD_FLAG_ORDERSNAP |
621 CEPH_OSD_FLAG_ONDISK |
622 CEPH_OSD_FLAG_WRITE;
623 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
624 flags |= CEPH_OSD_FLAG_ACK;
625 else
626 do_sync = 1;
627
628 /*
629 * we may need to do multiple writes here if we span an object
630 * boundary. this isn't atomic, unfortunately. :(
631 */
632more:
633 len = left;
634 req = ceph_osdc_new_request(&client->osdc, &ci->i_layout,
635 ceph_vino(inode), pos, &len,
636 CEPH_OSD_OP_WRITE, flags,
637 ci->i_snap_realm->cached_context,
638 do_sync,
639 ci->i_truncate_seq, ci->i_truncate_size,
640 &mtime, false, 2);
641 if (IS_ERR(req))
642 return PTR_ERR(req);
643
644 num_pages = calc_pages_for(pos, len);
645
646 if (file->f_flags & O_DIRECT) {
647 pages = get_direct_page_vector(data, num_pages, pos, len);
648 if (IS_ERR(pages)) {
649 ret = PTR_ERR(pages);
650 goto out;
651 }
652
653 /*
654 * throw out any page cache pages in this range. this
655 * may block.
656 */
657 truncate_inode_pages_range(inode->i_mapping, pos, pos+len);
658 } else {
659 pages = alloc_page_vector(num_pages);
660 if (IS_ERR(pages)) {
661 ret = PTR_ERR(pages);
662 goto out;
663 }
664 ret = copy_user_to_page_vector(pages, data, pos, len);
665 if (ret < 0) {
666 ceph_release_page_vector(pages, num_pages);
667 goto out;
668 }
669
670 if ((file->f_flags & O_SYNC) == 0) {
671 /* get a second commit callback */
672 req->r_safe_callback = sync_write_commit;
673 req->r_own_pages = 1;
674 }
675 }
676 req->r_pages = pages;
677 req->r_num_pages = num_pages;
678 req->r_inode = inode;
679
680 ret = ceph_osdc_start_request(&client->osdc, req, false);
681 if (!ret) {
682 if (req->r_safe_callback) {
683 /*
684 * Add to inode unsafe list only after we
685 * start_request so that a tid has been assigned.
686 */
687 spin_lock(&ci->i_unsafe_lock);
688 list_add(&ci->i_unsafe_writes, &req->r_unsafe_item);
689 spin_unlock(&ci->i_unsafe_lock);
690 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
691 }
692 ret = ceph_osdc_wait_request(&client->osdc, req);
693 }
694
695 if (file->f_flags & O_DIRECT)
696 put_page_vector(pages, num_pages);
697 else if (file->f_flags & O_SYNC)
698 ceph_release_page_vector(pages, num_pages);
699
700out:
701 ceph_osdc_put_request(req);
702 if (ret == 0) {
703 pos += len;
704 written += len;
705 left -= len;
706 if (left)
707 goto more;
708
709 ret = written;
710 *offset = pos;
711 if (pos > i_size_read(inode))
712 check_caps = ceph_inode_set_size(inode, pos);
713 if (check_caps)
714 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
715 NULL);
716 }
717 return ret;
718}
719
720/*
721 * Wrap generic_file_aio_read with checks for cap bits on the inode.
722 * Atomically grab references, so that those bits are not released
723 * back to the MDS mid-read.
724 *
725 * Hmm, the sync read case isn't actually async... should it be?
726 */
727static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
728 unsigned long nr_segs, loff_t pos)
729{
730 struct file *filp = iocb->ki_filp;
731 loff_t *ppos = &iocb->ki_pos;
732 size_t len = iov->iov_len;
733 struct inode *inode = filp->f_dentry->d_inode;
734 struct ceph_inode_info *ci = ceph_inode(inode);
735 ssize_t ret;
736 int got = 0;
737
738 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
739 inode, ceph_vinop(inode), pos, (unsigned)len, inode);
740 __ceph_do_pending_vmtruncate(inode);
741 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE,
742 &got, -1);
743 if (ret < 0)
744 goto out;
745 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
746 inode, ceph_vinop(inode), pos, (unsigned)len,
747 ceph_cap_string(got));
748
749 if ((got & CEPH_CAP_FILE_CACHE) == 0 ||
750 (iocb->ki_filp->f_flags & O_DIRECT) ||
751 (inode->i_sb->s_flags & MS_SYNCHRONOUS))
752 /* hmm, this isn't really async... */
753 ret = ceph_sync_read(filp, iov->iov_base, len, ppos);
754 else
755 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
756
757out:
758 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
759 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
760 ceph_put_cap_refs(ci, got);
761 return ret;
762}
763
764/*
765 * Take cap references to avoid releasing caps to MDS mid-write.
766 *
767 * If we are synchronous, and write with an old snap context, the OSD
768 * may return EOLDSNAPC. In that case, retry the write.. _after_
769 * dropping our cap refs and allowing the pending snap to logically
770 * complete _before_ this write occurs.
771 *
772 * If we are near ENOSPC, write synchronously.
773 */
774static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
775 unsigned long nr_segs, loff_t pos)
776{
777 struct file *file = iocb->ki_filp;
778 struct inode *inode = file->f_dentry->d_inode;
779 struct ceph_inode_info *ci = ceph_inode(inode);
780 struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc;
781 loff_t endoff = pos + iov->iov_len;
782 int got = 0;
783 int ret;
784
785 if (ceph_snap(inode) != CEPH_NOSNAP)
786 return -EROFS;
787
788retry_snap:
789 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
790 return -ENOSPC;
791 __ceph_do_pending_vmtruncate(inode);
792 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
793 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
794 inode->i_size);
795 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
796 &got, endoff);
797 if (ret < 0)
798 goto out;
799
800 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
801 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
802 ceph_cap_string(got));
803
804 if ((got & CEPH_CAP_FILE_BUFFER) == 0 ||
805 (iocb->ki_filp->f_flags & O_DIRECT) ||
806 (inode->i_sb->s_flags & MS_SYNCHRONOUS)) {
807 ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
808 &iocb->ki_pos);
809 } else {
810 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
811
812 if ((ret >= 0 || ret == -EIOCBQUEUED) &&
813 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
814 || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)))
815 ret = vfs_fsync_range(file, file->f_path.dentry,
816 pos, pos + ret - 1, 1);
817 }
818 if (ret >= 0) {
819 spin_lock(&inode->i_lock);
820 __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
821 spin_unlock(&inode->i_lock);
822 }
823
824out:
825 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
826 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
827 ceph_cap_string(got));
828 ceph_put_cap_refs(ci, got);
829
830 if (ret == -EOLDSNAPC) {
831 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
832 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
833 goto retry_snap;
834 }
835
836 return ret;
837}
838
839/*
840 * llseek. be sure to verify file size on SEEK_END.
841 */
842static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
843{
844 struct inode *inode = file->f_mapping->host;
845 int ret;
846
847 mutex_lock(&inode->i_mutex);
848 __ceph_do_pending_vmtruncate(inode);
849 switch (origin) {
850 case SEEK_END:
851 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
852 if (ret < 0) {
853 offset = ret;
854 goto out;
855 }
856 offset += inode->i_size;
857 break;
858 case SEEK_CUR:
859 /*
860 * Here we special-case the lseek(fd, 0, SEEK_CUR)
861 * position-querying operation. Avoid rewriting the "same"
862 * f_pos value back to the file because a concurrent read(),
863 * write() or lseek() might have altered it
864 */
865 if (offset == 0) {
866 offset = file->f_pos;
867 goto out;
868 }
869 offset += file->f_pos;
870 break;
871 }
872
873 if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
874 offset = -EINVAL;
875 goto out;
876 }
877
878 /* Special lock needed here? */
879 if (offset != file->f_pos) {
880 file->f_pos = offset;
881 file->f_version = 0;
882 }
883
884out:
885 mutex_unlock(&inode->i_mutex);
886 return offset;
887}
888
889const struct file_operations ceph_file_fops = {
890 .open = ceph_open,
891 .release = ceph_release,
892 .llseek = ceph_llseek,
893 .read = do_sync_read,
894 .write = do_sync_write,
895 .aio_read = ceph_aio_read,
896 .aio_write = ceph_aio_write,
897 .mmap = ceph_mmap,
898 .fsync = ceph_fsync,
899 .splice_read = generic_file_splice_read,
900 .splice_write = generic_file_splice_write,
901 .unlocked_ioctl = ceph_ioctl,
902 .compat_ioctl = ceph_ioctl,
903};
904