2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com>
7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995
9 * Remy Card (card@masi.ibp.fr)
10 * Laboratoire MASI - Institut Blaise Pascal
11 * Universite Pierre et Marie Curie (Paris VI)
13 * linux/fs/minix/inode.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * This file is part of exofs.
18 * exofs is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation. Since it is based on ext2, and the only
21 * valid version of GPL for the Linux kernel is version 2, the only valid
22 * version of GPL for exofs is version 2.
24 * exofs is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with exofs; if not, write to the Free Software
31 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include <linux/writeback.h>
35 #include <linux/buffer_head.h>
36 #include <scsi/scsi_device.h>
40 #define EXOFS_DBGMSG2(M...) do {} while (0)
42 enum { BIO_MAX_PAGES_KMALLOC =
43 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
47 struct exofs_sb_info *sbi;
48 struct request_queue *req_q;
50 unsigned expected_pages;
51 struct exofs_io_state *ios;
56 loff_t pg_first; /* keep 64bit also in 32-arches */
59 static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
62 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
65 pcol->req_q = osd_request_queue(sbi->s_dev);
67 pcol->expected_pages = expected_pages;
76 static void _pcol_reset(struct page_collect *pcol)
78 pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
86 /* this is probably the end of the loop but in writes
87 * it might not end here. don't be left with nothing
89 if (!pcol->expected_pages)
90 pcol->expected_pages = BIO_MAX_PAGES_KMALLOC;
93 static int pcol_try_alloc(struct page_collect *pcol)
95 int pages = min_t(unsigned, pcol->expected_pages,
96 BIO_MAX_PAGES_KMALLOC);
98 if (!pcol->ios) { /* First time allocate io_state */
99 int ret = exofs_get_io_state(pcol->sbi, &pcol->ios);
105 for (; pages; pages >>= 1) {
106 pcol->bio = bio_kmalloc(GFP_KERNEL, pages);
107 if (likely(pcol->bio))
111 EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n",
112 pcol->expected_pages);
116 static void pcol_free(struct page_collect *pcol)
124 exofs_put_io_state(pcol->ios);
129 static int pcol_add_page(struct page_collect *pcol, struct page *page,
132 int added_len = bio_add_pc_page(pcol->req_q, pcol->bio, page, len, 0);
133 if (unlikely(len != added_len))
141 static int update_read_page(struct page *page, int ret)
144 /* Everything is OK */
145 SetPageUptodate(page);
147 ClearPageError(page);
148 } else if (ret == -EFAULT) {
149 /* In this case we were trying to read something that wasn't on
150 * disk yet - return a page full of zeroes. This should be OK,
151 * because the object should be empty (if there was a write
152 * before this read, the read would be waiting with the page
154 clear_highpage(page);
156 SetPageUptodate(page);
158 ClearPageError(page);
159 ret = 0; /* recovered error */
160 EXOFS_DBGMSG("recovered read error\n");
167 static void update_write_page(struct page *page, int ret)
170 mapping_set_error(page->mapping, ret);
173 end_page_writeback(page);
176 /* Called at the end of reads, to optionally unlock pages and update their
179 static int __readpages_done(struct page_collect *pcol, bool do_unlock)
181 struct bio_vec *bvec;
186 int ret = exofs_check_io(pcol->ios, &resid);
189 good_bytes = pcol->length;
191 good_bytes = pcol->length - resid;
193 EXOFS_DBGMSG("readpages_done(0x%lx) good_bytes=0x%llx"
194 " length=0x%lx nr_pages=%u\n",
195 pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
198 __bio_for_each_segment(bvec, pcol->bio, i, 0) {
199 struct page *page = bvec->bv_page;
200 struct inode *inode = page->mapping->host;
203 if (inode != pcol->inode)
204 continue; /* osd might add more pages at end */
206 if (likely(length < good_bytes))
211 EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
212 inode->i_ino, page->index,
213 page_stat ? "bad_bytes" : "good_bytes");
215 ret = update_read_page(page, page_stat);
218 length += bvec->bv_len;
222 EXOFS_DBGMSG("readpages_done END\n");
226 /* callback of async reads */
227 static void readpages_done(struct exofs_io_state *ios, void *p)
229 struct page_collect *pcol = p;
231 __readpages_done(pcol, true);
232 atomic_dec(&pcol->sbi->s_curr_pending);
236 static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
238 struct bio_vec *bvec;
241 __bio_for_each_segment(bvec, pcol->bio, i, 0) {
242 struct page *page = bvec->bv_page;
245 update_read_page(page, ret);
247 update_write_page(page, ret);
253 static int read_exec(struct page_collect *pcol, bool is_sync)
255 struct exofs_i_info *oi = exofs_i(pcol->inode);
256 struct exofs_io_state *ios = pcol->ios;
257 struct page_collect *pcol_copy = NULL;
263 /* see comment in _readpage() about sync reads */
264 WARN_ON(is_sync && (pcol->nr_pages != 1));
266 ios->bio = pcol->bio;
267 ios->length = pcol->length;
268 ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
271 exofs_oi_read(oi, pcol->ios);
272 return __readpages_done(pcol, false);
275 pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
282 ios->done = readpages_done;
283 ios->private = pcol_copy;
284 ret = exofs_oi_read(oi, ios);
288 atomic_inc(&pcol->sbi->s_curr_pending);
290 EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
291 ios->obj.id, _LLU(ios->offset), pcol->length);
293 /* pages ownership was passed to pcol_copy */
299 _unlock_pcol_pages(pcol, ret, READ);
307 /* readpage_strip is called either directly from readpage() or by the VFS from
308 * within read_cache_pages(), to add one more page to be read. It will try to
309 * collect as many contiguous pages as posible. If a discontinuity is
310 * encountered, or it runs out of resources, it will submit the previous segment
311 * and will start a new collection. Eventually caller must submit the last
312 * segment if present.
314 static int readpage_strip(void *data, struct page *page)
316 struct page_collect *pcol = data;
317 struct inode *inode = pcol->inode;
318 struct exofs_i_info *oi = exofs_i(inode);
319 loff_t i_size = i_size_read(inode);
320 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
324 /* FIXME: Just for debugging, will be removed */
325 if (PageUptodate(page))
326 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
329 if (page->index < end_index)
330 len = PAGE_CACHE_SIZE;
331 else if (page->index == end_index)
332 len = i_size & ~PAGE_CACHE_MASK;
336 if (!len || !obj_created(oi)) {
337 /* this will be out of bounds, or doesn't exist yet.
338 * Current page is cleared and the request is split
340 clear_highpage(page);
342 SetPageUptodate(page);
344 ClearPageError(page);
347 EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
348 " splitting\n", inode->i_ino, page->index);
350 return read_exec(pcol, false);
355 if (unlikely(pcol->pg_first == -1)) {
356 pcol->pg_first = page->index;
357 } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
359 /* Discontinuity detected, split the request */
360 ret = read_exec(pcol, false);
367 ret = pcol_try_alloc(pcol);
372 if (len != PAGE_CACHE_SIZE)
373 zero_user(page, len, PAGE_CACHE_SIZE - len);
375 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
376 inode->i_ino, page->index, len);
378 ret = pcol_add_page(pcol, page, len);
380 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
381 "this_len=0x%zx nr_pages=%u length=0x%lx\n",
382 page, len, pcol->nr_pages, pcol->length);
384 /* split the request, and start again with current page */
385 ret = read_exec(pcol, false);
395 /* SetPageError(page); ??? */
400 static int exofs_readpages(struct file *file, struct address_space *mapping,
401 struct list_head *pages, unsigned nr_pages)
403 struct page_collect pcol;
406 _pcol_init(&pcol, nr_pages, mapping->host);
408 ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
410 EXOFS_ERR("read_cache_pages => %d\n", ret);
414 return read_exec(&pcol, false);
417 static int _readpage(struct page *page, bool is_sync)
419 struct page_collect pcol;
422 _pcol_init(&pcol, 1, page->mapping->host);
424 /* readpage_strip might call read_exec(,is_sync==false) at several
425 * places but not if we have a single page.
427 ret = readpage_strip(&pcol, page);
429 EXOFS_ERR("_readpage => %d\n", ret);
433 return read_exec(&pcol, is_sync);
437 * We don't need the file
439 static int exofs_readpage(struct file *file, struct page *page)
441 return _readpage(page, false);
444 /* Callback for osd_write. All writes are asynchronous */
445 static void writepages_done(struct exofs_io_state *ios, void *p)
447 struct page_collect *pcol = p;
448 struct bio_vec *bvec;
453 int ret = exofs_check_io(ios, &resid);
455 atomic_dec(&pcol->sbi->s_curr_pending);
458 good_bytes = pcol->length;
460 good_bytes = pcol->length - resid;
462 EXOFS_DBGMSG("writepages_done(0x%lx) good_bytes=0x%llx"
463 " length=0x%lx nr_pages=%u\n",
464 pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
467 __bio_for_each_segment(bvec, pcol->bio, i, 0) {
468 struct page *page = bvec->bv_page;
469 struct inode *inode = page->mapping->host;
472 if (inode != pcol->inode)
473 continue; /* osd might add more pages to a bio */
475 if (likely(length < good_bytes))
480 update_write_page(page, page_stat);
482 EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
483 inode->i_ino, page->index, page_stat);
485 length += bvec->bv_len;
490 EXOFS_DBGMSG("writepages_done END\n");
493 static int write_exec(struct page_collect *pcol)
495 struct exofs_i_info *oi = exofs_i(pcol->inode);
496 struct exofs_io_state *ios = pcol->ios;
497 struct page_collect *pcol_copy = NULL;
503 pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
505 EXOFS_ERR("write_exec: Faild to kmalloc(pcol)\n");
512 pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
514 ios->bio = pcol_copy->bio;
515 ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
516 ios->length = pcol_copy->length;
517 ios->done = writepages_done;
518 ios->private = pcol_copy;
520 ret = exofs_oi_write(oi, ios);
522 EXOFS_ERR("write_exec: exofs_oi_write() Faild\n");
526 atomic_inc(&pcol->sbi->s_curr_pending);
527 EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
528 pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
530 /* pages ownership was passed to pcol_copy */
535 _unlock_pcol_pages(pcol, ret, WRITE);
542 /* writepage_strip is called either directly from writepage() or by the VFS from
543 * within write_cache_pages(), to add one more page to be written to storage.
544 * It will try to collect as many contiguous pages as possible. If a
545 * discontinuity is encountered or it runs out of resources it will submit the
546 * previous segment and will start a new collection.
547 * Eventually caller must submit the last segment if present.
549 static int writepage_strip(struct page *page,
550 struct writeback_control *wbc_unused, void *data)
552 struct page_collect *pcol = data;
553 struct inode *inode = pcol->inode;
554 struct exofs_i_info *oi = exofs_i(inode);
555 loff_t i_size = i_size_read(inode);
556 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
560 BUG_ON(!PageLocked(page));
562 ret = wait_obj_created(oi);
566 if (page->index < end_index)
567 /* in this case, the page is within the limits of the file */
568 len = PAGE_CACHE_SIZE;
570 len = i_size & ~PAGE_CACHE_MASK;
572 if (page->index > end_index || !len) {
573 /* in this case, the page is outside the limits
574 * (truncate in progress)
576 ret = write_exec(pcol);
580 ClearPageError(page);
582 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
583 "outside the limits\n",
584 inode->i_ino, page->index);
591 if (unlikely(pcol->pg_first == -1)) {
592 pcol->pg_first = page->index;
593 } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
595 /* Discontinuity detected, split the request */
596 ret = write_exec(pcol);
600 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
601 inode->i_ino, page->index);
606 ret = pcol_try_alloc(pcol);
611 EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
612 inode->i_ino, page->index, len);
614 ret = pcol_add_page(pcol, page, len);
616 EXOFS_DBGMSG("Failed pcol_add_page "
617 "nr_pages=%u total_length=0x%lx\n",
618 pcol->nr_pages, pcol->length);
620 /* split the request, next loop will start again */
621 ret = write_exec(pcol);
623 EXOFS_DBGMSG("write_exec faild => %d", ret);
630 BUG_ON(PageWriteback(page));
631 set_page_writeback(page);
636 EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
637 inode->i_ino, page->index, ret);
638 set_bit(AS_EIO, &page->mapping->flags);
643 static int exofs_writepages(struct address_space *mapping,
644 struct writeback_control *wbc)
646 struct page_collect pcol;
647 long start, end, expected_pages;
650 start = wbc->range_start >> PAGE_CACHE_SHIFT;
651 end = (wbc->range_end == LLONG_MAX) ?
652 start + mapping->nrpages :
653 wbc->range_end >> PAGE_CACHE_SHIFT;
656 expected_pages = end - start + 1;
658 expected_pages = mapping->nrpages;
660 if (expected_pages < 32L)
661 expected_pages = 32L;
663 EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
664 "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
665 mapping->host->i_ino, wbc->range_start, wbc->range_end,
666 mapping->nrpages, start, end, expected_pages);
668 _pcol_init(&pcol, expected_pages, mapping->host);
670 ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
672 EXOFS_ERR("write_cache_pages => %d\n", ret);
676 return write_exec(&pcol);
679 static int exofs_writepage(struct page *page, struct writeback_control *wbc)
681 struct page_collect pcol;
684 _pcol_init(&pcol, 1, page->mapping->host);
686 ret = writepage_strip(page, NULL, &pcol);
688 EXOFS_ERR("exofs_writepage => %d\n", ret);
692 return write_exec(&pcol);
695 int exofs_write_begin(struct file *file, struct address_space *mapping,
696 loff_t pos, unsigned len, unsigned flags,
697 struct page **pagep, void **fsdata)
704 ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
707 EXOFS_DBGMSG("simple_write_begin faild\n");
714 /* read modify write */
715 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
716 ret = _readpage(page, true);
718 /*SetPageError was done by _readpage. Is it ok?*/
720 EXOFS_DBGMSG("__readpage_filler faild\n");
727 static int exofs_write_begin_export(struct file *file,
728 struct address_space *mapping,
729 loff_t pos, unsigned len, unsigned flags,
730 struct page **pagep, void **fsdata)
734 return exofs_write_begin(file, mapping, pos, len, flags, pagep,
738 const struct address_space_operations exofs_aops = {
739 .readpage = exofs_readpage,
740 .readpages = exofs_readpages,
741 .writepage = exofs_writepage,
742 .writepages = exofs_writepages,
743 .write_begin = exofs_write_begin_export,
744 .write_end = simple_write_end,
747 /******************************************************************************
749 *****************************************************************************/
752 * Test whether an inode is a fast symlink.
754 static inline int exofs_inode_is_fast_symlink(struct inode *inode)
756 struct exofs_i_info *oi = exofs_i(inode);
758 return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
762 * get_block_t - Fill in a buffer_head
763 * An OSD takes care of block allocation so we just fake an allocation by
764 * putting in the inode's sector_t in the buffer_head.
765 * TODO: What about the case of create==0 and @iblock does not exist in the
768 static int exofs_get_block(struct inode *inode, sector_t iblock,
769 struct buffer_head *bh_result, int create)
771 map_bh(bh_result, inode->i_sb, iblock);
775 const struct osd_attr g_attr_logical_length = ATTR_DEF(
776 OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
778 static int _do_truncate(struct inode *inode)
780 struct exofs_i_info *oi = exofs_i(inode);
781 loff_t isize = i_size_read(inode);
784 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
786 nobh_truncate_page(inode->i_mapping, isize, exofs_get_block);
788 ret = exofs_oi_truncate(oi, (u64)isize);
789 EXOFS_DBGMSG("(0x%lx) size=0x%llx\n", inode->i_ino, isize);
794 * Truncate a file to the specified size - all we have to do is set the size
795 * attribute. We make sure the object exists first.
797 void exofs_truncate(struct inode *inode)
799 struct exofs_i_info *oi = exofs_i(inode);
802 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
803 || S_ISLNK(inode->i_mode)))
805 if (exofs_inode_is_fast_symlink(inode))
807 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
810 /* if we are about to truncate an object, and it hasn't been
813 if (unlikely(wait_obj_created(oi)))
816 ret = _do_truncate(inode);
821 mark_inode_dirty(inode);
824 make_bad_inode(inode);
829 * Set inode attributes - just call generic functions.
831 int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
833 struct inode *inode = dentry->d_inode;
836 error = inode_change_ok(inode, iattr);
840 error = inode_setattr(inode, iattr);
845 * Read an inode from the OSD, and return it as is. We also return the size
846 * attribute in the 'obj_size' argument.
848 static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
849 struct exofs_fcb *inode, uint64_t *obj_size)
851 struct exofs_sb_info *sbi = sb->s_fs_info;
852 struct osd_attr attrs[2];
853 struct exofs_io_state *ios;
857 ret = exofs_get_io_state(sbi, &ios);
859 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
863 ios->obj.id = exofs_oi_objno(oi);
864 exofs_make_credential(oi->i_cred, &ios->obj);
865 ios->cred = oi->i_cred;
867 attrs[0] = g_attr_inode_data;
868 attrs[1] = g_attr_logical_length;
869 ios->in_attr = attrs;
870 ios->in_attr_len = ARRAY_SIZE(attrs);
872 ret = exofs_sbi_read(ios);
876 ret = extract_attr_from_ios(ios, &attrs[0]);
878 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
881 WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
882 memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
884 ret = extract_attr_from_ios(ios, &attrs[1]);
886 EXOFS_ERR("%s: extract_attr of logical_length failed\n",
890 *obj_size = get_unaligned_be64(attrs[1].val_ptr);
893 exofs_put_io_state(ios);
897 static void __oi_init(struct exofs_i_info *oi)
899 init_waitqueue_head(&oi->i_wq);
903 * Fill in an inode read from the OSD and set it up for use
905 struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
907 struct exofs_i_info *oi;
908 struct exofs_fcb fcb;
913 inode = iget_locked(sb, ino);
915 return ERR_PTR(-ENOMEM);
916 if (!(inode->i_state & I_NEW))
921 /* read the inode from the osd */
922 ret = exofs_get_inode(sb, oi, &fcb, &obj_size);
928 /* copy stuff from on-disk struct to in-memory struct */
929 inode->i_mode = le16_to_cpu(fcb.i_mode);
930 inode->i_uid = le32_to_cpu(fcb.i_uid);
931 inode->i_gid = le32_to_cpu(fcb.i_gid);
932 inode->i_nlink = le16_to_cpu(fcb.i_links_count);
933 inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
934 inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
935 inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
936 inode->i_ctime.tv_nsec =
937 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
938 oi->i_commit_size = le64_to_cpu(fcb.i_size);
939 i_size_write(inode, oi->i_commit_size);
940 inode->i_blkbits = EXOFS_BLKSHIFT;
941 inode->i_generation = le32_to_cpu(fcb.i_generation);
943 if ((inode->i_size != obj_size) &&
944 (!exofs_inode_is_fast_symlink(inode))) {
945 EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n",
946 inode->i_size, _LLU(obj_size));
947 /* FIXME: call exofs_inode_recovery() */
950 oi->i_dir_start_lookup = 0;
952 if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
957 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
960 old_decode_dev(le32_to_cpu(fcb.i_data[0]));
963 new_decode_dev(le32_to_cpu(fcb.i_data[1]));
965 memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
968 if (S_ISREG(inode->i_mode)) {
969 inode->i_op = &exofs_file_inode_operations;
970 inode->i_fop = &exofs_file_operations;
971 inode->i_mapping->a_ops = &exofs_aops;
972 } else if (S_ISDIR(inode->i_mode)) {
973 inode->i_op = &exofs_dir_inode_operations;
974 inode->i_fop = &exofs_dir_operations;
975 inode->i_mapping->a_ops = &exofs_aops;
976 } else if (S_ISLNK(inode->i_mode)) {
977 if (exofs_inode_is_fast_symlink(inode))
978 inode->i_op = &exofs_fast_symlink_inode_operations;
980 inode->i_op = &exofs_symlink_inode_operations;
981 inode->i_mapping->a_ops = &exofs_aops;
984 inode->i_op = &exofs_special_inode_operations;
986 init_special_inode(inode, inode->i_mode,
987 old_decode_dev(le32_to_cpu(fcb.i_data[0])));
989 init_special_inode(inode, inode->i_mode,
990 new_decode_dev(le32_to_cpu(fcb.i_data[1])));
993 unlock_new_inode(inode);
1001 int __exofs_wait_obj_created(struct exofs_i_info *oi)
1003 if (!obj_created(oi)) {
1004 BUG_ON(!obj_2bcreated(oi));
1005 wait_event(oi->i_wq, obj_created(oi));
1007 return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
1010 * Callback function from exofs_new_inode(). The important thing is that we
1011 * set the obj_created flag so that other methods know that the object exists on
1014 static void create_done(struct exofs_io_state *ios, void *p)
1016 struct inode *inode = p;
1017 struct exofs_i_info *oi = exofs_i(inode);
1018 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
1021 ret = exofs_check_io(ios, NULL);
1022 exofs_put_io_state(ios);
1024 atomic_dec(&sbi->s_curr_pending);
1026 if (unlikely(ret)) {
1027 EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
1028 _LLU(exofs_oi_objno(oi)), _LLU(sbi->s_pid));
1029 /*TODO: When FS is corrupted creation can fail, object already
1030 * exist. Get rid of this asynchronous creation, if exist
1031 * increment the obj counter and try the next object. Until we
1032 * succeed. All these dangling objects will be made into lost
1033 * files by chkfs.exofs
1037 set_obj_created(oi);
1039 atomic_dec(&inode->i_count);
1044 * Set up a new inode and create an object for it on the OSD
1046 struct inode *exofs_new_inode(struct inode *dir, int mode)
1048 struct super_block *sb;
1049 struct inode *inode;
1050 struct exofs_i_info *oi;
1051 struct exofs_sb_info *sbi;
1052 struct exofs_io_state *ios;
1056 inode = new_inode(sb);
1058 return ERR_PTR(-ENOMEM);
1060 oi = exofs_i(inode);
1063 set_obj_2bcreated(oi);
1065 sbi = sb->s_fs_info;
1068 inode->i_uid = current->cred->fsuid;
1069 if (dir->i_mode & S_ISGID) {
1070 inode->i_gid = dir->i_gid;
1074 inode->i_gid = current->cred->fsgid;
1076 inode->i_mode = mode;
1078 inode->i_ino = sbi->s_nextid++;
1079 inode->i_blkbits = EXOFS_BLKSHIFT;
1080 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1081 oi->i_commit_size = inode->i_size = 0;
1082 spin_lock(&sbi->s_next_gen_lock);
1083 inode->i_generation = sbi->s_next_generation++;
1084 spin_unlock(&sbi->s_next_gen_lock);
1085 insert_inode_hash(inode);
1087 mark_inode_dirty(inode);
1089 ret = exofs_get_io_state(sbi, &ios);
1090 if (unlikely(ret)) {
1091 EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
1092 return ERR_PTR(ret);
1095 ios->obj.id = exofs_oi_objno(oi);
1096 exofs_make_credential(oi->i_cred, &ios->obj);
1098 /* increment the refcount so that the inode will still be around when we
1099 * reach the callback
1101 atomic_inc(&inode->i_count);
1103 ios->done = create_done;
1104 ios->private = inode;
1105 ios->cred = oi->i_cred;
1106 ret = exofs_sbi_create(ios);
1108 atomic_dec(&inode->i_count);
1109 exofs_put_io_state(ios);
1110 return ERR_PTR(ret);
1112 atomic_inc(&sbi->s_curr_pending);
1118 * struct to pass two arguments to update_inode's callback
1120 struct updatei_args {
1121 struct exofs_sb_info *sbi;
1122 struct exofs_fcb fcb;
1126 * Callback function from exofs_update_inode().
1128 static void updatei_done(struct exofs_io_state *ios, void *p)
1130 struct updatei_args *args = p;
1132 exofs_put_io_state(ios);
1134 atomic_dec(&args->sbi->s_curr_pending);
1140 * Write the inode to the OSD. Just fill up the struct, and set the attribute
1141 * synchronously or asynchronously depending on the do_sync flag.
1143 static int exofs_update_inode(struct inode *inode, int do_sync)
1145 struct exofs_i_info *oi = exofs_i(inode);
1146 struct super_block *sb = inode->i_sb;
1147 struct exofs_sb_info *sbi = sb->s_fs_info;
1148 struct exofs_io_state *ios;
1149 struct osd_attr attr;
1150 struct exofs_fcb *fcb;
1151 struct updatei_args *args;
1154 args = kzalloc(sizeof(*args), GFP_KERNEL);
1160 fcb->i_mode = cpu_to_le16(inode->i_mode);
1161 fcb->i_uid = cpu_to_le32(inode->i_uid);
1162 fcb->i_gid = cpu_to_le32(inode->i_gid);
1163 fcb->i_links_count = cpu_to_le16(inode->i_nlink);
1164 fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1165 fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1166 fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1167 oi->i_commit_size = i_size_read(inode);
1168 fcb->i_size = cpu_to_le64(oi->i_commit_size);
1169 fcb->i_generation = cpu_to_le32(inode->i_generation);
1171 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1172 if (old_valid_dev(inode->i_rdev)) {
1174 cpu_to_le32(old_encode_dev(inode->i_rdev));
1179 cpu_to_le32(new_encode_dev(inode->i_rdev));
1183 memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
1185 ret = exofs_get_io_state(sbi, &ios);
1186 if (unlikely(ret)) {
1187 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
1191 attr = g_attr_inode_data;
1193 ios->out_attr_len = 1;
1194 ios->out_attr = &attr;
1196 if (!obj_created(oi)) {
1197 EXOFS_DBGMSG("!obj_created\n");
1198 BUG_ON(!obj_2bcreated(oi));
1199 wait_event(oi->i_wq, obj_created(oi));
1200 EXOFS_DBGMSG("wait_event done\n");
1205 ios->done = updatei_done;
1206 ios->private = args;
1209 ret = exofs_oi_write(oi, ios);
1210 if (!do_sync && !ret) {
1211 atomic_inc(&sbi->s_curr_pending);
1212 goto out; /* deallocation in updatei_done */
1215 exofs_put_io_state(ios);
1219 EXOFS_DBGMSG("ret=>%d\n", ret);
1223 int exofs_write_inode(struct inode *inode, int wait)
1225 return exofs_update_inode(inode, wait);
1229 * Callback function from exofs_delete_inode() - don't have much cleaning up to
1232 static void delete_done(struct exofs_io_state *ios, void *p)
1234 struct exofs_sb_info *sbi = p;
1236 exofs_put_io_state(ios);
1238 atomic_dec(&sbi->s_curr_pending);
1242 * Called when the refcount of an inode reaches zero. We remove the object
1243 * from the OSD here. We make sure the object was created before we try and
1246 void exofs_delete_inode(struct inode *inode)
1248 struct exofs_i_info *oi = exofs_i(inode);
1249 struct super_block *sb = inode->i_sb;
1250 struct exofs_sb_info *sbi = sb->s_fs_info;
1251 struct exofs_io_state *ios;
1254 truncate_inode_pages(&inode->i_data, 0);
1256 if (is_bad_inode(inode))
1259 mark_inode_dirty(inode);
1260 exofs_update_inode(inode, inode_needs_sync(inode));
1263 if (inode->i_blocks)
1264 exofs_truncate(inode);
1268 ret = exofs_get_io_state(sbi, &ios);
1269 if (unlikely(ret)) {
1270 EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
1274 /* if we are deleting an obj that hasn't been created yet, wait */
1275 if (!obj_created(oi)) {
1276 BUG_ON(!obj_2bcreated(oi));
1277 wait_event(oi->i_wq, obj_created(oi));
1280 ios->obj.id = exofs_oi_objno(oi);
1281 ios->done = delete_done;
1283 ios->cred = oi->i_cred;
1284 ret = exofs_sbi_remove(ios);
1286 EXOFS_ERR("%s: exofs_sbi_remove failed\n", __func__);
1287 exofs_put_io_state(ios);
1290 atomic_inc(&sbi->s_curr_pending);