]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ceph/file.c
xps: Transmit Packet Steering
[net-next-2.6.git] / fs / ceph / file.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
124e68e7 2
3d14c5d2 3#include <linux/module.h>
124e68e7 4#include <linux/sched.h>
5a0e3ad6 5#include <linux/slab.h>
124e68e7
SW
6#include <linux/file.h>
7#include <linux/namei.h>
8#include <linux/writeback.h>
9
10#include "super.h"
11#include "mds_client.h"
12
13/*
14 * Ceph file operations
15 *
16 * Implement basic open/close functionality, and implement
17 * read/write.
18 *
19 * We implement three modes of file I/O:
20 * - buffered uses the generic_file_aio_{read,write} helpers
21 *
22 * - synchronous is used when there is multi-client read/write
23 * sharing, avoids the page cache, and synchronously waits for an
24 * ack from the OSD.
25 *
26 * - direct io takes the variant of the sync path that references
27 * user pages directly.
28 *
29 * fsync() flushes and waits on dirty pages, but just queues metadata
30 * for writeback: since the MDS can recover size and mtime there is no
31 * need to wait for MDS acknowledgement.
32 */
33
34
35/*
36 * Prepare an open request. Preallocate ceph_cap to avoid an
37 * inopportune ENOMEM later.
38 */
39static struct ceph_mds_request *
40prepare_open_request(struct super_block *sb, int flags, int create_mode)
41{
3d14c5d2
YS
42 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
43 struct ceph_mds_client *mdsc = fsc->mdsc;
124e68e7
SW
44 struct ceph_mds_request *req;
45 int want_auth = USE_ANY_MDS;
46 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
47
48 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
49 want_auth = USE_AUTH_MDS;
50
51 req = ceph_mdsc_create_request(mdsc, op, want_auth);
52 if (IS_ERR(req))
53 goto out;
54 req->r_fmode = ceph_flags_to_mode(flags);
55 req->r_args.open.flags = cpu_to_le32(flags);
56 req->r_args.open.mode = cpu_to_le32(create_mode);
6a18be16 57 req->r_args.open.preferred = cpu_to_le32(-1);
124e68e7
SW
58out:
59 return req;
60}
61
62/*
63 * initialize private struct file data.
64 * if we fail, clean up by dropping fmode reference on the ceph_inode
65 */
66static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
67{
68 struct ceph_file_info *cf;
69 int ret = 0;
70
71 switch (inode->i_mode & S_IFMT) {
72 case S_IFREG:
73 case S_IFDIR:
74 dout("init_file %p %p 0%o (regular)\n", inode, file,
75 inode->i_mode);
76 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
77 if (cf == NULL) {
78 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
79 return -ENOMEM;
80 }
81 cf->fmode = fmode;
82 cf->next_offset = 2;
83 file->private_data = cf;
84 BUG_ON(inode->i_fop->release != ceph_release);
85 break;
86
87 case S_IFLNK:
88 dout("init_file %p %p 0%o (symlink)\n", inode, file,
89 inode->i_mode);
90 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
91 break;
92
93 default:
94 dout("init_file %p %p 0%o (special)\n", inode, file,
95 inode->i_mode);
96 /*
97 * we need to drop the open ref now, since we don't
98 * have .release set to ceph_release.
99 */
100 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
101 BUG_ON(inode->i_fop->release == ceph_release);
102
103 /* call the proper open fop */
104 ret = inode->i_fop->open(inode, file);
105 }
106 return ret;
107}
108
109/*
110 * If the filp already has private_data, that means the file was
111 * already opened by intent during lookup, and we do nothing.
112 *
113 * If we already have the requisite capabilities, we can satisfy
114 * the open request locally (no need to request new caps from the
115 * MDS). We do, however, need to inform the MDS (asynchronously)
116 * if our wanted caps set expands.
117 */
118int ceph_open(struct inode *inode, struct file *file)
119{
120 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
121 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
122 struct ceph_mds_client *mdsc = fsc->mdsc;
124e68e7
SW
123 struct ceph_mds_request *req;
124 struct ceph_file_info *cf = file->private_data;
125 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
126 int err;
127 int flags, fmode, wanted;
128
129 if (cf) {
130 dout("open file %p is already opened\n", file);
131 return 0;
132 }
133
134 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
135 flags = file->f_flags & ~(O_CREAT|O_EXCL);
136 if (S_ISDIR(inode->i_mode))
137 flags = O_DIRECTORY; /* mds likes to know */
138
139 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
140 ceph_vinop(inode), file, flags, file->f_flags);
141 fmode = ceph_flags_to_mode(flags);
142 wanted = ceph_caps_for_mode(fmode);
143
144 /* snapped files are read-only */
145 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
146 return -EROFS;
147
148 /* trivially open snapdir */
149 if (ceph_snap(inode) == CEPH_SNAPDIR) {
150 spin_lock(&inode->i_lock);
151 __ceph_get_fmode(ci, fmode);
152 spin_unlock(&inode->i_lock);
153 return ceph_init_file(inode, file, fmode);
154 }
155
156 /*
157 * No need to block if we have any caps. Update wanted set
158 * asynchronously.
159 */
160 spin_lock(&inode->i_lock);
161 if (__ceph_is_any_real_caps(ci)) {
162 int mds_wanted = __ceph_caps_mds_wanted(ci);
163 int issued = __ceph_caps_issued(ci, NULL);
164
165 dout("open %p fmode %d want %s issued %s using existing\n",
166 inode, fmode, ceph_cap_string(wanted),
167 ceph_cap_string(issued));
168 __ceph_get_fmode(ci, fmode);
169 spin_unlock(&inode->i_lock);
170
171 /* adjust wanted? */
172 if ((issued & wanted) != wanted &&
173 (mds_wanted & wanted) != wanted &&
174 ceph_snap(inode) != CEPH_SNAPDIR)
175 ceph_check_caps(ci, 0, NULL);
176
177 return ceph_init_file(inode, file, fmode);
178 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
179 (ci->i_snap_caps & wanted) == wanted) {
180 __ceph_get_fmode(ci, fmode);
181 spin_unlock(&inode->i_lock);
182 return ceph_init_file(inode, file, fmode);
183 }
184 spin_unlock(&inode->i_lock);
185
186 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
187 req = prepare_open_request(inode->i_sb, flags, 0);
188 if (IS_ERR(req)) {
189 err = PTR_ERR(req);
190 goto out;
191 }
192 req->r_inode = igrab(inode);
193 req->r_num_caps = 1;
194 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
195 if (!err)
196 err = ceph_init_file(inode, file, req->r_fmode);
197 ceph_mdsc_put_request(req);
198 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
199out:
200 return err;
201}
202
203
204/*
205 * Do a lookup + open with a single request.
206 *
207 * If this succeeds, but some subsequent check in the vfs
208 * may_open() fails, the struct *file gets cleaned up (i.e.
209 * ceph_release gets called). So fear not!
210 */
211/*
212 * flags
213 * path_lookup_open -> LOOKUP_OPEN
214 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
215 */
216struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
217 struct nameidata *nd, int mode,
218 int locked_dir)
219{
3d14c5d2
YS
220 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
221 struct ceph_mds_client *mdsc = fsc->mdsc;
124e68e7
SW
222 struct file *file = nd->intent.open.file;
223 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
224 struct ceph_mds_request *req;
225 int err;
226 int flags = nd->intent.open.flags - 1; /* silly vfs! */
227
228 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
229 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
230
231 /* do the open */
232 req = prepare_open_request(dir->i_sb, flags, mode);
233 if (IS_ERR(req))
7e34bc52 234 return ERR_CAST(req);
124e68e7
SW
235 req->r_dentry = dget(dentry);
236 req->r_num_caps = 2;
237 if (flags & O_CREAT) {
238 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
239 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
240 }
241 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
242 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
243 dentry = ceph_finish_lookup(req, dentry, err);
244 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
245 err = ceph_handle_notrace_create(dir, dentry);
246 if (!err)
247 err = ceph_init_file(req->r_dentry->d_inode, file,
248 req->r_fmode);
249 ceph_mdsc_put_request(req);
250 dout("ceph_lookup_open result=%p\n", dentry);
251 return dentry;
252}
253
254int ceph_release(struct inode *inode, struct file *file)
255{
256 struct ceph_inode_info *ci = ceph_inode(inode);
257 struct ceph_file_info *cf = file->private_data;
258
259 dout("release inode %p file %p\n", inode, file);
260 ceph_put_fmode(ci, cf->fmode);
261 if (cf->last_readdir)
262 ceph_mdsc_put_request(cf->last_readdir);
263 kfree(cf->last_name);
264 kfree(cf->dir_info);
265 dput(cf->dentry);
266 kmem_cache_free(ceph_file_cachep, cf);
195d3ce2
SW
267
268 /* wake up anyone waiting for caps on this inode */
03066f23 269 wake_up_all(&ci->i_cap_wq);
124e68e7
SW
270 return 0;
271}
272
124e68e7
SW
273/*
274 * Read a range of bytes striped over one or more objects. Iterate over
275 * objects we stripe over. (That's not atomic, but good enough for now.)
276 *
277 * If we get a short result from the OSD, check against i_size; we need to
278 * only return a short read to the caller if we hit EOF.
279 */
280static int striped_read(struct inode *inode,
281 u64 off, u64 len,
6a026589
SW
282 struct page **pages, int num_pages,
283 int *checkeof)
124e68e7 284{
3d14c5d2 285 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
124e68e7
SW
286 struct ceph_inode_info *ci = ceph_inode(inode);
287 u64 pos, this_len;
972f0d3a 288 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
124e68e7
SW
289 int left, pages_left;
290 int read;
291 struct page **page_pos;
292 int ret;
293 bool hit_stripe, was_short;
294
295 /*
296 * we may need to do multiple reads. not atomic, unfortunately.
297 */
298 pos = off;
299 left = len;
300 page_pos = pages;
301 pages_left = num_pages;
302 read = 0;
303
304more:
305 this_len = left;
3d14c5d2 306 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
124e68e7
SW
307 &ci->i_layout, pos, &this_len,
308 ci->i_truncate_seq,
309 ci->i_truncate_size,
310 page_pos, pages_left);
311 hit_stripe = this_len < left;
312 was_short = ret >= 0 && ret < this_len;
313 if (ret == -ENOENT)
314 ret = 0;
315 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
316 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
317
318 if (ret > 0) {
319 int didpages =
320 ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT;
321
322 if (read < pos - off) {
323 dout(" zero gap %llu to %llu\n", off + read, pos);
3d14c5d2
YS
324 ceph_zero_page_vector_range(page_off + read,
325 pos - off - read, pages);
124e68e7
SW
326 }
327 pos += ret;
328 read = pos - off;
329 left -= ret;
330 page_pos += didpages;
331 pages_left -= didpages;
332
333 /* hit stripe? */
334 if (left && hit_stripe)
335 goto more;
336 }
337
338 if (was_short) {
339 /* was original extent fully inside i_size? */
340 if (pos + left <= inode->i_size) {
341 dout("zero tail\n");
3d14c5d2
YS
342 ceph_zero_page_vector_range(page_off + read, len - read,
343 pages);
972f0d3a 344 read = len;
124e68e7
SW
345 goto out;
346 }
347
348 /* check i_size */
6a026589 349 *checkeof = 1;
124e68e7
SW
350 }
351
352out:
353 if (ret >= 0)
354 ret = read;
355 dout("striped_read returns %d\n", ret);
356 return ret;
357}
358
359/*
360 * Completely synchronous read and write methods. Direct from __user
361 * buffer to osd, or directly to user pages (if O_DIRECT).
362 *
363 * If the read spans object boundary, just do multiple reads.
364 */
365static ssize_t ceph_sync_read(struct file *file, char __user *data,
6a026589 366 unsigned len, loff_t *poff, int *checkeof)
124e68e7
SW
367{
368 struct inode *inode = file->f_dentry->d_inode;
369 struct page **pages;
370 u64 off = *poff;
371 int num_pages = calc_pages_for(off, len);
372 int ret;
373
374 dout("sync_read on file %p %llu~%u %s\n", file, off, len,
375 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
376
377 if (file->f_flags & O_DIRECT) {
3d14c5d2 378 pages = ceph_get_direct_page_vector(data, num_pages, off, len);
124e68e7
SW
379
380 /*
381 * flush any page cache pages in this range. this
382 * will make concurrent normal and O_DIRECT io slow,
383 * but it will at least behave sensibly when they are
384 * in sequence.
385 */
124e68e7 386 } else {
34d23762 387 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
124e68e7
SW
388 }
389 if (IS_ERR(pages))
390 return PTR_ERR(pages);
391
29065a51
YS
392 ret = filemap_write_and_wait(inode->i_mapping);
393 if (ret < 0)
394 goto done;
395
6a026589 396 ret = striped_read(inode, off, len, pages, num_pages, checkeof);
124e68e7
SW
397
398 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
3d14c5d2 399 ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
124e68e7
SW
400 if (ret >= 0)
401 *poff = off + ret;
402
29065a51 403done:
124e68e7 404 if (file->f_flags & O_DIRECT)
3d14c5d2 405 ceph_put_page_vector(pages, num_pages);
124e68e7
SW
406 else
407 ceph_release_page_vector(pages, num_pages);
408 dout("sync_read result %d\n", ret);
409 return ret;
410}
411
412/*
413 * Write commit callback, called if we requested both an ACK and
414 * ONDISK commit reply from the OSD.
415 */
416static void sync_write_commit(struct ceph_osd_request *req,
417 struct ceph_msg *msg)
418{
419 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
420
421 dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
422 spin_lock(&ci->i_unsafe_lock);
423 list_del_init(&req->r_unsafe_item);
424 spin_unlock(&ci->i_unsafe_lock);
425 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
426}
427
428/*
429 * Synchronous write, straight from __user pointer or user pages (if
430 * O_DIRECT).
431 *
432 * If write spans object boundary, just do multiple writes. (For a
433 * correct atomic write, we should e.g. take write locks on all
434 * objects, rollback on failure, etc.)
435 */
436static ssize_t ceph_sync_write(struct file *file, const char __user *data,
437 size_t left, loff_t *offset)
438{
439 struct inode *inode = file->f_dentry->d_inode;
440 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2 441 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
124e68e7
SW
442 struct ceph_osd_request *req;
443 struct page **pages;
444 int num_pages;
445 long long unsigned pos;
446 u64 len;
447 int written = 0;
448 int flags;
449 int do_sync = 0;
450 int check_caps = 0;
451 int ret;
452 struct timespec mtime = CURRENT_TIME;
453
454 if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
455 return -EROFS;
456
457 dout("sync_write on file %p %lld~%u %s\n", file, *offset,
458 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
459
460 if (file->f_flags & O_APPEND)
461 pos = i_size_read(inode);
462 else
463 pos = *offset;
464
29065a51
YS
465 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
466 if (ret < 0)
467 return ret;
468
469 ret = invalidate_inode_pages2_range(inode->i_mapping,
470 pos >> PAGE_CACHE_SHIFT,
471 (pos + left) >> PAGE_CACHE_SHIFT);
472 if (ret < 0)
473 dout("invalidate_inode_pages2_range returned %d\n", ret);
474
124e68e7
SW
475 flags = CEPH_OSD_FLAG_ORDERSNAP |
476 CEPH_OSD_FLAG_ONDISK |
477 CEPH_OSD_FLAG_WRITE;
478 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
479 flags |= CEPH_OSD_FLAG_ACK;
480 else
481 do_sync = 1;
482
483 /*
484 * we may need to do multiple writes here if we span an object
485 * boundary. this isn't atomic, unfortunately. :(
486 */
487more:
488 len = left;
3d14c5d2 489 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
124e68e7
SW
490 ceph_vino(inode), pos, &len,
491 CEPH_OSD_OP_WRITE, flags,
492 ci->i_snap_realm->cached_context,
493 do_sync,
494 ci->i_truncate_seq, ci->i_truncate_size,
495 &mtime, false, 2);
a79832f2
SW
496 if (!req)
497 return -ENOMEM;
124e68e7
SW
498
499 num_pages = calc_pages_for(pos, len);
500
501 if (file->f_flags & O_DIRECT) {
3d14c5d2 502 pages = ceph_get_direct_page_vector(data, num_pages, pos, len);
124e68e7
SW
503 if (IS_ERR(pages)) {
504 ret = PTR_ERR(pages);
505 goto out;
506 }
507
508 /*
509 * throw out any page cache pages in this range. this
510 * may block.
511 */
213c99ee 512 truncate_inode_pages_range(inode->i_mapping, pos,
5c6a2cdb 513 (pos+len) | (PAGE_CACHE_SIZE-1));
124e68e7 514 } else {
34d23762 515 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
124e68e7
SW
516 if (IS_ERR(pages)) {
517 ret = PTR_ERR(pages);
518 goto out;
519 }
3d14c5d2 520 ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
124e68e7
SW
521 if (ret < 0) {
522 ceph_release_page_vector(pages, num_pages);
523 goto out;
524 }
525
526 if ((file->f_flags & O_SYNC) == 0) {
527 /* get a second commit callback */
528 req->r_safe_callback = sync_write_commit;
529 req->r_own_pages = 1;
530 }
531 }
532 req->r_pages = pages;
533 req->r_num_pages = num_pages;
534 req->r_inode = inode;
535
3d14c5d2 536 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
124e68e7
SW
537 if (!ret) {
538 if (req->r_safe_callback) {
539 /*
540 * Add to inode unsafe list only after we
541 * start_request so that a tid has been assigned.
542 */
543 spin_lock(&ci->i_unsafe_lock);
936aeb5c 544 list_add(&req->r_unsafe_item, &ci->i_unsafe_writes);
124e68e7
SW
545 spin_unlock(&ci->i_unsafe_lock);
546 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
547 }
3d14c5d2 548 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
124e68e7
SW
549 }
550
551 if (file->f_flags & O_DIRECT)
3d14c5d2 552 ceph_put_page_vector(pages, num_pages);
124e68e7
SW
553 else if (file->f_flags & O_SYNC)
554 ceph_release_page_vector(pages, num_pages);
555
556out:
557 ceph_osdc_put_request(req);
558 if (ret == 0) {
559 pos += len;
560 written += len;
561 left -= len;
562 if (left)
563 goto more;
564
565 ret = written;
566 *offset = pos;
567 if (pos > i_size_read(inode))
568 check_caps = ceph_inode_set_size(inode, pos);
569 if (check_caps)
570 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
571 NULL);
572 }
573 return ret;
574}
575
576/*
577 * Wrap generic_file_aio_read with checks for cap bits on the inode.
578 * Atomically grab references, so that those bits are not released
579 * back to the MDS mid-read.
580 *
581 * Hmm, the sync read case isn't actually async... should it be?
582 */
583static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
584 unsigned long nr_segs, loff_t pos)
585{
586 struct file *filp = iocb->ki_filp;
2962507c 587 struct ceph_file_info *fi = filp->private_data;
124e68e7
SW
588 loff_t *ppos = &iocb->ki_pos;
589 size_t len = iov->iov_len;
590 struct inode *inode = filp->f_dentry->d_inode;
591 struct ceph_inode_info *ci = ceph_inode(inode);
cd84db6e 592 void __user *base = iov->iov_base;
124e68e7 593 ssize_t ret;
2962507c 594 int want, got = 0;
6a026589 595 int checkeof = 0, read = 0;
124e68e7
SW
596
597 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
598 inode, ceph_vinop(inode), pos, (unsigned)len, inode);
6a026589 599again:
124e68e7 600 __ceph_do_pending_vmtruncate(inode);
2962507c
SW
601 if (fi->fmode & CEPH_FILE_MODE_LAZY)
602 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
603 else
604 want = CEPH_CAP_FILE_CACHE;
605 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
124e68e7
SW
606 if (ret < 0)
607 goto out;
608 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
609 inode, ceph_vinop(inode), pos, (unsigned)len,
610 ceph_cap_string(got));
611
2962507c 612 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
124e68e7
SW
613 (iocb->ki_filp->f_flags & O_DIRECT) ||
614 (inode->i_sb->s_flags & MS_SYNCHRONOUS))
615 /* hmm, this isn't really async... */
6a026589 616 ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
124e68e7
SW
617 else
618 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
619
620out:
621 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
622 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
623 ceph_put_cap_refs(ci, got);
6a026589
SW
624
625 if (checkeof && ret >= 0) {
626 int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
627
628 /* hit EOF or hole? */
629 if (statret == 0 && *ppos < inode->i_size) {
630 dout("aio_read sync_read hit hole, reading more\n");
631 read += ret;
632 base += ret;
633 len -= ret;
634 checkeof = 0;
635 goto again;
636 }
637 }
638 if (ret >= 0)
639 ret += read;
640
124e68e7
SW
641 return ret;
642}
643
644/*
645 * Take cap references to avoid releasing caps to MDS mid-write.
646 *
647 * If we are synchronous, and write with an old snap context, the OSD
648 * may return EOLDSNAPC. In that case, retry the write.. _after_
649 * dropping our cap refs and allowing the pending snap to logically
650 * complete _before_ this write occurs.
651 *
652 * If we are near ENOSPC, write synchronously.
653 */
654static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
655 unsigned long nr_segs, loff_t pos)
656{
657 struct file *file = iocb->ki_filp;
33caad32 658 struct ceph_file_info *fi = file->private_data;
124e68e7
SW
659 struct inode *inode = file->f_dentry->d_inode;
660 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
661 struct ceph_osd_client *osdc =
662 &ceph_sb_to_client(inode->i_sb)->client->osdc;
124e68e7 663 loff_t endoff = pos + iov->iov_len;
33caad32 664 int want, got = 0;
88d892a3 665 int ret, err;
124e68e7
SW
666
667 if (ceph_snap(inode) != CEPH_NOSNAP)
668 return -EROFS;
669
670retry_snap:
671 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
672 return -ENOSPC;
673 __ceph_do_pending_vmtruncate(inode);
674 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
675 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
676 inode->i_size);
33caad32
SW
677 if (fi->fmode & CEPH_FILE_MODE_LAZY)
678 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
679 else
680 want = CEPH_CAP_FILE_BUFFER;
681 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
124e68e7
SW
682 if (ret < 0)
683 goto out;
684
685 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
686 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
687 ceph_cap_string(got));
688
33caad32 689 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
124e68e7
SW
690 (iocb->ki_filp->f_flags & O_DIRECT) ||
691 (inode->i_sb->s_flags & MS_SYNCHRONOUS)) {
692 ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
693 &iocb->ki_pos);
694 } else {
695 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
696
697 if ((ret >= 0 || ret == -EIOCBQUEUED) &&
698 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
88d892a3 699 || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
8018ab05 700 err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
88d892a3
YS
701 if (err < 0)
702 ret = err;
703 }
124e68e7
SW
704 }
705 if (ret >= 0) {
706 spin_lock(&inode->i_lock);
707 __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
708 spin_unlock(&inode->i_lock);
709 }
710
711out:
712 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
713 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
714 ceph_cap_string(got));
715 ceph_put_cap_refs(ci, got);
716
717 if (ret == -EOLDSNAPC) {
718 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
719 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
720 goto retry_snap;
721 }
722
723 return ret;
724}
725
726/*
727 * llseek. be sure to verify file size on SEEK_END.
728 */
729static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
730{
731 struct inode *inode = file->f_mapping->host;
732 int ret;
733
734 mutex_lock(&inode->i_mutex);
735 __ceph_do_pending_vmtruncate(inode);
736 switch (origin) {
737 case SEEK_END:
738 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
739 if (ret < 0) {
740 offset = ret;
741 goto out;
742 }
743 offset += inode->i_size;
744 break;
745 case SEEK_CUR:
746 /*
747 * Here we special-case the lseek(fd, 0, SEEK_CUR)
748 * position-querying operation. Avoid rewriting the "same"
749 * f_pos value back to the file because a concurrent read(),
750 * write() or lseek() might have altered it
751 */
752 if (offset == 0) {
753 offset = file->f_pos;
754 goto out;
755 }
756 offset += file->f_pos;
757 break;
758 }
759
760 if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
761 offset = -EINVAL;
762 goto out;
763 }
764
765 /* Special lock needed here? */
766 if (offset != file->f_pos) {
767 file->f_pos = offset;
768 file->f_version = 0;
769 }
770
771out:
772 mutex_unlock(&inode->i_mutex);
773 return offset;
774}
775
776const struct file_operations ceph_file_fops = {
777 .open = ceph_open,
778 .release = ceph_release,
779 .llseek = ceph_llseek,
780 .read = do_sync_read,
781 .write = do_sync_write,
782 .aio_read = ceph_aio_read,
783 .aio_write = ceph_aio_write,
784 .mmap = ceph_mmap,
785 .fsync = ceph_fsync,
40819f6f
GF
786 .lock = ceph_lock,
787 .flock = ceph_flock,
124e68e7
SW
788 .splice_read = generic_file_splice_read,
789 .splice_write = generic_file_splice_write,
790 .unlocked_ioctl = ceph_ioctl,
791 .compat_ioctl = ceph_ioctl,
792};
793