]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - fs/nfs/pnfs.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ecryptfs...
[net-next-2.6.git] / fs / nfs / pnfs.c
index cf795625610e67b3dea6eff8ee12ee708e89eb76..db773428f95f28e03a9631d316f2eec711b2aa41 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <linux/nfs_fs.h>
+#include "internal.h"
 #include "pnfs.h"
 
 #define NFSDBG_FACILITY                NFSDBG_PNFS
@@ -74,7 +75,7 @@ void
 unset_pnfs_layoutdriver(struct nfs_server *nfss)
 {
        if (nfss->pnfs_curr_ld) {
-               nfss->pnfs_curr_ld->uninitialize_mountpoint(nfss);
+               nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
                module_put(nfss->pnfs_curr_ld->owner);
        }
        nfss->pnfs_curr_ld = NULL;
@@ -114,7 +115,7 @@ set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
                goto out_no_driver;
        }
        server->pnfs_curr_ld = ld_type;
-       if (ld_type->initialize_mountpoint(server)) {
+       if (ld_type->set_layoutdriver(server)) {
                printk(KERN_ERR
                       "%s: Error initializing mount point for layout driver %u.\n",
                       __func__, id);
@@ -139,6 +140,11 @@ pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
                printk(KERN_ERR "%s id 0 is reserved\n", __func__);
                return status;
        }
+       if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
+               printk(KERN_ERR "%s Layout driver must provide "
+                      "alloc_lseg and free_lseg.\n", __func__);
+               return status;
+       }
 
        spin_lock(&pnfs_spinlock);
        tmp = find_pnfs_driver_locked(ld_type->id);
@@ -166,3 +172,612 @@ pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
        spin_unlock(&pnfs_spinlock);
 }
 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
+
+/*
+ * pNFS client layout cache
+ */
+
+static void
+get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+{
+       assert_spin_locked(&lo->inode->i_lock);
+       lo->refcount++;
+}
+
+static void
+put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+{
+       assert_spin_locked(&lo->inode->i_lock);
+       BUG_ON(lo->refcount == 0);
+
+       lo->refcount--;
+       if (!lo->refcount) {
+               dprintk("%s: freeing layout cache %p\n", __func__, lo);
+               BUG_ON(!list_empty(&lo->layouts));
+               NFS_I(lo->inode)->layout = NULL;
+               kfree(lo);
+       }
+}
+
+void
+put_layout_hdr(struct inode *inode)
+{
+       spin_lock(&inode->i_lock);
+       put_layout_hdr_locked(NFS_I(inode)->layout);
+       spin_unlock(&inode->i_lock);
+}
+
+static void
+init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
+{
+       INIT_LIST_HEAD(&lseg->fi_list);
+       kref_init(&lseg->kref);
+       lseg->layout = lo;
+}
+
+/* Called without i_lock held, as the free_lseg call may sleep */
+static void
+destroy_lseg(struct kref *kref)
+{
+       struct pnfs_layout_segment *lseg =
+               container_of(kref, struct pnfs_layout_segment, kref);
+       struct inode *ino = lseg->layout->inode;
+
+       dprintk("--> %s\n", __func__);
+       NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+       /* Matched by get_layout_hdr_locked in pnfs_insert_layout */
+       put_layout_hdr(ino);
+}
+
+static void
+put_lseg(struct pnfs_layout_segment *lseg)
+{
+       if (!lseg)
+               return;
+
+       dprintk("%s: lseg %p ref %d\n", __func__, lseg,
+               atomic_read(&lseg->kref.refcount));
+       kref_put(&lseg->kref, destroy_lseg);
+}
+
+static void
+pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
+{
+       struct pnfs_layout_segment *lseg, *next;
+       struct nfs_client *clp;
+
+       dprintk("%s:Begin lo %p\n", __func__, lo);
+
+       assert_spin_locked(&lo->inode->i_lock);
+       list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
+               dprintk("%s: freeing lseg %p\n", __func__, lseg);
+               list_move(&lseg->fi_list, tmp_list);
+       }
+       clp = NFS_SERVER(lo->inode)->nfs_client;
+       spin_lock(&clp->cl_lock);
+       /* List does not take a reference, so no need for put here */
+       list_del_init(&lo->layouts);
+       spin_unlock(&clp->cl_lock);
+       write_seqlock(&lo->seqlock);
+       clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
+       write_sequnlock(&lo->seqlock);
+
+       dprintk("%s:Return\n", __func__);
+}
+
+static void
+pnfs_free_lseg_list(struct list_head *tmp_list)
+{
+       struct pnfs_layout_segment *lseg;
+
+       while (!list_empty(tmp_list)) {
+               lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
+                               fi_list);
+               dprintk("%s calling put_lseg on %p\n", __func__, lseg);
+               list_del(&lseg->fi_list);
+               put_lseg(lseg);
+       }
+}
+
+void
+pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+       struct pnfs_layout_hdr *lo;
+       LIST_HEAD(tmp_list);
+
+       spin_lock(&nfsi->vfs_inode.i_lock);
+       lo = nfsi->layout;
+       if (lo) {
+               pnfs_clear_lseg_list(lo, &tmp_list);
+               /* Matched by refcount set to 1 in alloc_init_layout_hdr */
+               put_layout_hdr_locked(lo);
+       }
+       spin_unlock(&nfsi->vfs_inode.i_lock);
+       pnfs_free_lseg_list(&tmp_list);
+}
+
+/*
+ * Called by the state manger to remove all layouts established under an
+ * expired lease.
+ */
+void
+pnfs_destroy_all_layouts(struct nfs_client *clp)
+{
+       struct pnfs_layout_hdr *lo;
+       LIST_HEAD(tmp_list);
+
+       spin_lock(&clp->cl_lock);
+       list_splice_init(&clp->cl_layouts, &tmp_list);
+       spin_unlock(&clp->cl_lock);
+
+       while (!list_empty(&tmp_list)) {
+               lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
+                               layouts);
+               dprintk("%s freeing layout for inode %lu\n", __func__,
+                       lo->inode->i_ino);
+               pnfs_destroy_layout(NFS_I(lo->inode));
+       }
+}
+
+/* update lo->stateid with new if is more recent
+ *
+ * lo->stateid could be the open stateid, in which case we just use what given.
+ */
+static void
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+                       const nfs4_stateid *new)
+{
+       nfs4_stateid *old = &lo->stateid;
+       bool overwrite = false;
+
+       write_seqlock(&lo->seqlock);
+       if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
+           memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
+               overwrite = true;
+       else {
+               u32 oldseq, newseq;
+
+               oldseq = be32_to_cpu(old->stateid.seqid);
+               newseq = be32_to_cpu(new->stateid.seqid);
+               if ((int)(newseq - oldseq) > 0)
+                       overwrite = true;
+       }
+       if (overwrite)
+               memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
+       write_sequnlock(&lo->seqlock);
+}
+
+static void
+pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
+                             struct nfs4_state *state)
+{
+       int seq;
+
+       dprintk("--> %s\n", __func__);
+       write_seqlock(&lo->seqlock);
+       do {
+               seq = read_seqbegin(&state->seqlock);
+               memcpy(lo->stateid.data, state->stateid.data,
+                      sizeof(state->stateid.data));
+       } while (read_seqretry(&state->seqlock, seq));
+       set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
+       write_sequnlock(&lo->seqlock);
+       dprintk("<-- %s\n", __func__);
+}
+
+void
+pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+                       struct nfs4_state *open_state)
+{
+       int seq;
+
+       dprintk("--> %s\n", __func__);
+       do {
+               seq = read_seqbegin(&lo->seqlock);
+               if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
+                       /* This will trigger retry of the read */
+                       pnfs_layout_from_open_stateid(lo, open_state);
+               } else
+                       memcpy(dst->data, lo->stateid.data,
+                              sizeof(lo->stateid.data));
+       } while (read_seqretry(&lo->seqlock, seq));
+       dprintk("<-- %s\n", __func__);
+}
+
+/*
+* Get layout from server.
+*    for now, assume that whole file layouts are requested.
+*    arg->offset: 0
+*    arg->length: all ones
+*/
+static struct pnfs_layout_segment *
+send_layoutget(struct pnfs_layout_hdr *lo,
+          struct nfs_open_context *ctx,
+          u32 iomode)
+{
+       struct inode *ino = lo->inode;
+       struct nfs_server *server = NFS_SERVER(ino);
+       struct nfs4_layoutget *lgp;
+       struct pnfs_layout_segment *lseg = NULL;
+
+       dprintk("--> %s\n", __func__);
+
+       BUG_ON(ctx == NULL);
+       lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
+       if (lgp == NULL) {
+               put_layout_hdr(lo->inode);
+               return NULL;
+       }
+       lgp->args.minlength = NFS4_MAX_UINT64;
+       lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+       lgp->args.range.iomode = iomode;
+       lgp->args.range.offset = 0;
+       lgp->args.range.length = NFS4_MAX_UINT64;
+       lgp->args.type = server->pnfs_curr_ld->id;
+       lgp->args.inode = ino;
+       lgp->args.ctx = get_nfs_open_context(ctx);
+       lgp->lsegpp = &lseg;
+
+       /* Synchronously retrieve layout information from server and
+        * store in lseg.
+        */
+       nfs4_proc_layoutget(lgp);
+       if (!lseg) {
+               /* remember that LAYOUTGET failed and suspend trying */
+               set_bit(lo_fail_bit(iomode), &lo->state);
+       }
+       return lseg;
+}
+
+/*
+ * Compare two layout segments for sorting into layout cache.
+ * We want to preferentially return RW over RO layouts, so ensure those
+ * are seen first.
+ */
+static s64
+cmp_layout(u32 iomode1, u32 iomode2)
+{
+       /* read > read/write */
+       return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
+}
+
+static void
+pnfs_insert_layout(struct pnfs_layout_hdr *lo,
+                  struct pnfs_layout_segment *lseg)
+{
+       struct pnfs_layout_segment *lp;
+       int found = 0;
+
+       dprintk("%s:Begin\n", __func__);
+
+       assert_spin_locked(&lo->inode->i_lock);
+       if (list_empty(&lo->segs)) {
+               struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
+
+               spin_lock(&clp->cl_lock);
+               BUG_ON(!list_empty(&lo->layouts));
+               list_add_tail(&lo->layouts, &clp->cl_layouts);
+               spin_unlock(&clp->cl_lock);
+       }
+       list_for_each_entry(lp, &lo->segs, fi_list) {
+               if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
+                       continue;
+               list_add_tail(&lseg->fi_list, &lp->fi_list);
+               dprintk("%s: inserted lseg %p "
+                       "iomode %d offset %llu length %llu before "
+                       "lp %p iomode %d offset %llu length %llu\n",
+                       __func__, lseg, lseg->range.iomode,
+                       lseg->range.offset, lseg->range.length,
+                       lp, lp->range.iomode, lp->range.offset,
+                       lp->range.length);
+               found = 1;
+               break;
+       }
+       if (!found) {
+               list_add_tail(&lseg->fi_list, &lo->segs);
+               dprintk("%s: inserted lseg %p "
+                       "iomode %d offset %llu length %llu at tail\n",
+                       __func__, lseg, lseg->range.iomode,
+                       lseg->range.offset, lseg->range.length);
+       }
+       get_layout_hdr_locked(lo);
+
+       dprintk("%s:Return\n", __func__);
+}
+
+static struct pnfs_layout_hdr *
+alloc_init_layout_hdr(struct inode *ino)
+{
+       struct pnfs_layout_hdr *lo;
+
+       lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
+       if (!lo)
+               return NULL;
+       lo->refcount = 1;
+       INIT_LIST_HEAD(&lo->layouts);
+       INIT_LIST_HEAD(&lo->segs);
+       seqlock_init(&lo->seqlock);
+       lo->inode = ino;
+       return lo;
+}
+
+static struct pnfs_layout_hdr *
+pnfs_find_alloc_layout(struct inode *ino)
+{
+       struct nfs_inode *nfsi = NFS_I(ino);
+       struct pnfs_layout_hdr *new = NULL;
+
+       dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
+
+       assert_spin_locked(&ino->i_lock);
+       if (nfsi->layout)
+               return nfsi->layout;
+
+       spin_unlock(&ino->i_lock);
+       new = alloc_init_layout_hdr(ino);
+       spin_lock(&ino->i_lock);
+
+       if (likely(nfsi->layout == NULL))       /* Won the race? */
+               nfsi->layout = new;
+       else
+               kfree(new);
+       return nfsi->layout;
+}
+
+/*
+ * iomode matching rules:
+ * iomode      lseg    match
+ * -----       -----   -----
+ * ANY         READ    true
+ * ANY         RW      true
+ * RW          READ    false
+ * RW          RW      true
+ * READ                READ    true
+ * READ                RW      true
+ */
+static int
+is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
+{
+       return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
+}
+
+/*
+ * lookup range in layout
+ */
+static struct pnfs_layout_segment *
+pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
+{
+       struct pnfs_layout_segment *lseg, *ret = NULL;
+
+       dprintk("%s:Begin\n", __func__);
+
+       assert_spin_locked(&lo->inode->i_lock);
+       list_for_each_entry(lseg, &lo->segs, fi_list) {
+               if (is_matching_lseg(lseg, iomode)) {
+                       ret = lseg;
+                       break;
+               }
+               if (cmp_layout(iomode, lseg->range.iomode) > 0)
+                       break;
+       }
+
+       dprintk("%s:Return lseg %p ref %d\n",
+               __func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
+       return ret;
+}
+
+/*
+ * Layout segment is retreived from the server if not cached.
+ * The appropriate layout segment is referenced and returned to the caller.
+ */
+struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino,
+                  struct nfs_open_context *ctx,
+                  enum pnfs_iomode iomode)
+{
+       struct nfs_inode *nfsi = NFS_I(ino);
+       struct pnfs_layout_hdr *lo;
+       struct pnfs_layout_segment *lseg = NULL;
+
+       if (!pnfs_enabled_sb(NFS_SERVER(ino)))
+               return NULL;
+       spin_lock(&ino->i_lock);
+       lo = pnfs_find_alloc_layout(ino);
+       if (lo == NULL) {
+               dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
+               goto out_unlock;
+       }
+
+       /* Check to see if the layout for the given range already exists */
+       lseg = pnfs_has_layout(lo, iomode);
+       if (lseg) {
+               dprintk("%s: Using cached lseg %p for iomode %d)\n",
+                       __func__, lseg, iomode);
+               goto out_unlock;
+       }
+
+       /* if LAYOUTGET already failed once we don't try again */
+       if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
+               goto out_unlock;
+
+       get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
+       spin_unlock(&ino->i_lock);
+
+       lseg = send_layoutget(lo, ctx, iomode);
+out:
+       dprintk("%s end, state 0x%lx lseg %p\n", __func__,
+               nfsi->layout->state, lseg);
+       return lseg;
+out_unlock:
+       spin_unlock(&ino->i_lock);
+       goto out;
+}
+
+int
+pnfs_layout_process(struct nfs4_layoutget *lgp)
+{
+       struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
+       struct nfs4_layoutget_res *res = &lgp->res;
+       struct pnfs_layout_segment *lseg;
+       struct inode *ino = lo->inode;
+       int status = 0;
+
+       /* Inject layout blob into I/O device driver */
+       lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
+       if (!lseg || IS_ERR(lseg)) {
+               if (!lseg)
+                       status = -ENOMEM;
+               else
+                       status = PTR_ERR(lseg);
+               dprintk("%s: Could not allocate layout: error %d\n",
+                      __func__, status);
+               goto out;
+       }
+
+       spin_lock(&ino->i_lock);
+       init_lseg(lo, lseg);
+       lseg->range = res->range;
+       *lgp->lsegpp = lseg;
+       pnfs_insert_layout(lo, lseg);
+
+       /* Done processing layoutget. Set the layout stateid */
+       pnfs_set_layout_stateid(lo, &res->stateid);
+       spin_unlock(&ino->i_lock);
+out:
+       return status;
+}
+
+/*
+ * Device ID cache. Currently supports one layout type per struct nfs_client.
+ * Add layout type to the lookup key to expand to support multiple types.
+ */
+int
+pnfs_alloc_init_deviceid_cache(struct nfs_client *clp,
+                        void (*free_callback)(struct pnfs_deviceid_node *))
+{
+       struct pnfs_deviceid_cache *c;
+
+       c = kzalloc(sizeof(struct pnfs_deviceid_cache), GFP_KERNEL);
+       if (!c)
+               return -ENOMEM;
+       spin_lock(&clp->cl_lock);
+       if (clp->cl_devid_cache != NULL) {
+               atomic_inc(&clp->cl_devid_cache->dc_ref);
+               dprintk("%s [kref [%d]]\n", __func__,
+                       atomic_read(&clp->cl_devid_cache->dc_ref));
+               kfree(c);
+       } else {
+               /* kzalloc initializes hlists */
+               spin_lock_init(&c->dc_lock);
+               atomic_set(&c->dc_ref, 1);
+               c->dc_free_callback = free_callback;
+               clp->cl_devid_cache = c;
+               dprintk("%s [new]\n", __func__);
+       }
+       spin_unlock(&clp->cl_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_alloc_init_deviceid_cache);
+
+/*
+ * Called from pnfs_layoutdriver_type->free_lseg
+ * last layout segment reference frees deviceid
+ */
+void
+pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
+                 struct pnfs_deviceid_node *devid)
+{
+       struct nfs4_deviceid *id = &devid->de_id;
+       struct pnfs_deviceid_node *d;
+       struct hlist_node *n;
+       long h = nfs4_deviceid_hash(id);
+
+       dprintk("%s [%d]\n", __func__, atomic_read(&devid->de_ref));
+       if (!atomic_dec_and_lock(&devid->de_ref, &c->dc_lock))
+               return;
+
+       hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[h], de_node)
+               if (!memcmp(&d->de_id, id, sizeof(*id))) {
+                       hlist_del_rcu(&d->de_node);
+                       spin_unlock(&c->dc_lock);
+                       synchronize_rcu();
+                       c->dc_free_callback(devid);
+                       return;
+               }
+       spin_unlock(&c->dc_lock);
+       /* Why wasn't it found in  the list? */
+       BUG();
+}
+EXPORT_SYMBOL_GPL(pnfs_put_deviceid);
+
+/* Find and reference a deviceid */
+struct pnfs_deviceid_node *
+pnfs_find_get_deviceid(struct pnfs_deviceid_cache *c, struct nfs4_deviceid *id)
+{
+       struct pnfs_deviceid_node *d;
+       struct hlist_node *n;
+       long hash = nfs4_deviceid_hash(id);
+
+       dprintk("--> %s hash %ld\n", __func__, hash);
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[hash], de_node) {
+               if (!memcmp(&d->de_id, id, sizeof(*id))) {
+                       if (!atomic_inc_not_zero(&d->de_ref)) {
+                               goto fail;
+                       } else {
+                               rcu_read_unlock();
+                               return d;
+                       }
+               }
+       }
+fail:
+       rcu_read_unlock();
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(pnfs_find_get_deviceid);
+
+/*
+ * Add a deviceid to the cache.
+ * GETDEVICEINFOs for same deviceid can race. If deviceid is found, discard new
+ */
+struct pnfs_deviceid_node *
+pnfs_add_deviceid(struct pnfs_deviceid_cache *c, struct pnfs_deviceid_node *new)
+{
+       struct pnfs_deviceid_node *d;
+       long hash = nfs4_deviceid_hash(&new->de_id);
+
+       dprintk("--> %s hash %ld\n", __func__, hash);
+       spin_lock(&c->dc_lock);
+       d = pnfs_find_get_deviceid(c, &new->de_id);
+       if (d) {
+               spin_unlock(&c->dc_lock);
+               dprintk("%s [discard]\n", __func__);
+               c->dc_free_callback(new);
+               return d;
+       }
+       INIT_HLIST_NODE(&new->de_node);
+       atomic_set(&new->de_ref, 1);
+       hlist_add_head_rcu(&new->de_node, &c->dc_deviceids[hash]);
+       spin_unlock(&c->dc_lock);
+       dprintk("%s [new]\n", __func__);
+       return new;
+}
+EXPORT_SYMBOL_GPL(pnfs_add_deviceid);
+
+void
+pnfs_put_deviceid_cache(struct nfs_client *clp)
+{
+       struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
+
+       dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache);
+       if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
+               int i;
+               /* Verify cache is empty */
+               for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i++)
+                       BUG_ON(!hlist_empty(&local->dc_deviceids[i]));
+               clp->cl_devid_cache = NULL;
+               spin_unlock(&clp->cl_lock);
+               kfree(local);
+       }
+}
+EXPORT_SYMBOL_GPL(pnfs_put_deviceid_cache);