]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/nfs/delegation.c
Linux-2.6.12-rc2
[net-next-2.6.git] / fs / nfs / delegation.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9#include <linux/config.h>
10#include <linux/completion.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/spinlock.h>
14
15#include <linux/nfs4.h>
16#include <linux/nfs_fs.h>
17#include <linux/nfs_xdr.h>
18
19#include "delegation.h"
20
21static struct nfs_delegation *nfs_alloc_delegation(void)
22{
23 return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
24}
25
26static void nfs_free_delegation(struct nfs_delegation *delegation)
27{
28 if (delegation->cred)
29 put_rpccred(delegation->cred);
30 kfree(delegation);
31}
32
33static void nfs_delegation_claim_opens(struct inode *inode)
34{
35 struct nfs_inode *nfsi = NFS_I(inode);
36 struct nfs_open_context *ctx;
37 struct nfs4_state *state;
38
39again:
40 spin_lock(&inode->i_lock);
41 list_for_each_entry(ctx, &nfsi->open_files, list) {
42 state = ctx->state;
43 if (state == NULL)
44 continue;
45 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
46 continue;
47 get_nfs_open_context(ctx);
48 spin_unlock(&inode->i_lock);
49 if (nfs4_open_delegation_recall(ctx->dentry, state) < 0)
50 return;
51 put_nfs_open_context(ctx);
52 goto again;
53 }
54 spin_unlock(&inode->i_lock);
55}
56
57/*
58 * Set up a delegation on an inode
59 */
60void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
61{
62 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
63
64 if (delegation == NULL)
65 return;
66 memcpy(delegation->stateid.data, res->delegation.data,
67 sizeof(delegation->stateid.data));
68 delegation->type = res->delegation_type;
69 delegation->maxsize = res->maxsize;
70 put_rpccred(cred);
71 delegation->cred = get_rpccred(cred);
72 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
73 NFS_I(inode)->delegation_state = delegation->type;
74 smp_wmb();
75}
76
77/*
78 * Set up a delegation on an inode
79 */
80int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
81{
82 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
83 struct nfs_inode *nfsi = NFS_I(inode);
84 struct nfs_delegation *delegation;
85 int status = 0;
86
87 delegation = nfs_alloc_delegation();
88 if (delegation == NULL)
89 return -ENOMEM;
90 memcpy(delegation->stateid.data, res->delegation.data,
91 sizeof(delegation->stateid.data));
92 delegation->type = res->delegation_type;
93 delegation->maxsize = res->maxsize;
94 delegation->cred = get_rpccred(cred);
95 delegation->inode = inode;
96
97 spin_lock(&clp->cl_lock);
98 if (nfsi->delegation == NULL) {
99 list_add(&delegation->super_list, &clp->cl_delegations);
100 nfsi->delegation = delegation;
101 nfsi->delegation_state = delegation->type;
102 delegation = NULL;
103 } else {
104 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
105 sizeof(delegation->stateid)) != 0 ||
106 delegation->type != nfsi->delegation->type) {
107 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
108 __FUNCTION__, NIPQUAD(clp->cl_addr));
109 status = -EIO;
110 }
111 }
112 spin_unlock(&clp->cl_lock);
113 if (delegation != NULL)
114 kfree(delegation);
115 return status;
116}
117
118static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
119{
120 int res = 0;
121
122 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
123
124 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
125 nfs_free_delegation(delegation);
126 return res;
127}
128
129/* Sync all data to disk upon delegation return */
130static void nfs_msync_inode(struct inode *inode)
131{
132 filemap_fdatawrite(inode->i_mapping);
133 nfs_wb_all(inode);
134 filemap_fdatawait(inode->i_mapping);
135}
136
137/*
138 * Basic procedure for returning a delegation to the server
139 */
140int nfs_inode_return_delegation(struct inode *inode)
141{
142 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
143 struct nfs_inode *nfsi = NFS_I(inode);
144 struct nfs_delegation *delegation;
145 int res = 0;
146
147 nfs_msync_inode(inode);
148 down_read(&clp->cl_sem);
149 /* Guard against new delegated open calls */
150 down_write(&nfsi->rwsem);
151 spin_lock(&clp->cl_lock);
152 delegation = nfsi->delegation;
153 if (delegation != NULL) {
154 list_del_init(&delegation->super_list);
155 nfsi->delegation = NULL;
156 nfsi->delegation_state = 0;
157 }
158 spin_unlock(&clp->cl_lock);
159 nfs_delegation_claim_opens(inode);
160 up_write(&nfsi->rwsem);
161 up_read(&clp->cl_sem);
162 nfs_msync_inode(inode);
163
164 if (delegation != NULL)
165 res = nfs_do_return_delegation(inode, delegation);
166 return res;
167}
168
169/*
170 * Return all delegations associated to a super block
171 */
172void nfs_return_all_delegations(struct super_block *sb)
173{
174 struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
175 struct nfs_delegation *delegation;
176 struct inode *inode;
177
178 if (clp == NULL)
179 return;
180restart:
181 spin_lock(&clp->cl_lock);
182 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
183 if (delegation->inode->i_sb != sb)
184 continue;
185 inode = igrab(delegation->inode);
186 if (inode == NULL)
187 continue;
188 spin_unlock(&clp->cl_lock);
189 nfs_inode_return_delegation(inode);
190 iput(inode);
191 goto restart;
192 }
193 spin_unlock(&clp->cl_lock);
194}
195
196/*
197 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
198 */
199void nfs_handle_cb_pathdown(struct nfs4_client *clp)
200{
201 struct nfs_delegation *delegation;
202 struct inode *inode;
203
204 if (clp == NULL)
205 return;
206restart:
207 spin_lock(&clp->cl_lock);
208 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
209 inode = igrab(delegation->inode);
210 if (inode == NULL)
211 continue;
212 spin_unlock(&clp->cl_lock);
213 nfs_inode_return_delegation(inode);
214 iput(inode);
215 goto restart;
216 }
217 spin_unlock(&clp->cl_lock);
218}
219
220struct recall_threadargs {
221 struct inode *inode;
222 struct nfs4_client *clp;
223 const nfs4_stateid *stateid;
224
225 struct completion started;
226 int result;
227};
228
229static int recall_thread(void *data)
230{
231 struct recall_threadargs *args = (struct recall_threadargs *)data;
232 struct inode *inode = igrab(args->inode);
233 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
234 struct nfs_inode *nfsi = NFS_I(inode);
235 struct nfs_delegation *delegation;
236
237 daemonize("nfsv4-delegreturn");
238
239 nfs_msync_inode(inode);
240 down_read(&clp->cl_sem);
241 down_write(&nfsi->rwsem);
242 spin_lock(&clp->cl_lock);
243 delegation = nfsi->delegation;
244 if (delegation != NULL && memcmp(delegation->stateid.data,
245 args->stateid->data,
246 sizeof(delegation->stateid.data)) == 0) {
247 list_del_init(&delegation->super_list);
248 nfsi->delegation = NULL;
249 nfsi->delegation_state = 0;
250 args->result = 0;
251 } else {
252 delegation = NULL;
253 args->result = -ENOENT;
254 }
255 spin_unlock(&clp->cl_lock);
256 complete(&args->started);
257 nfs_delegation_claim_opens(inode);
258 up_write(&nfsi->rwsem);
259 up_read(&clp->cl_sem);
260 nfs_msync_inode(inode);
261
262 if (delegation != NULL)
263 nfs_do_return_delegation(inode, delegation);
264 iput(inode);
265 module_put_and_exit(0);
266}
267
268/*
269 * Asynchronous delegation recall!
270 */
271int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
272{
273 struct recall_threadargs data = {
274 .inode = inode,
275 .stateid = stateid,
276 };
277 int status;
278
279 init_completion(&data.started);
280 __module_get(THIS_MODULE);
281 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
282 if (status < 0)
283 goto out_module_put;
284 wait_for_completion(&data.started);
285 return data.result;
286out_module_put:
287 module_put(THIS_MODULE);
288 return status;
289}
290
291/*
292 * Retrieve the inode associated with a delegation
293 */
294struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
295{
296 struct nfs_delegation *delegation;
297 struct inode *res = NULL;
298 spin_lock(&clp->cl_lock);
299 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
300 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
301 res = igrab(delegation->inode);
302 break;
303 }
304 }
305 spin_unlock(&clp->cl_lock);
306 return res;
307}
308
309/*
310 * Mark all delegations as needing to be reclaimed
311 */
312void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
313{
314 struct nfs_delegation *delegation;
315 spin_lock(&clp->cl_lock);
316 list_for_each_entry(delegation, &clp->cl_delegations, super_list)
317 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
318 spin_unlock(&clp->cl_lock);
319}
320
321/*
322 * Reap all unclaimed delegations after reboot recovery is done
323 */
324void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
325{
326 struct nfs_delegation *delegation, *n;
327 LIST_HEAD(head);
328 spin_lock(&clp->cl_lock);
329 list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
330 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
331 continue;
332 list_move(&delegation->super_list, &head);
333 NFS_I(delegation->inode)->delegation = NULL;
334 NFS_I(delegation->inode)->delegation_state = 0;
335 }
336 spin_unlock(&clp->cl_lock);
337 while(!list_empty(&head)) {
338 delegation = list_entry(head.next, struct nfs_delegation, super_list);
339 list_del(&delegation->super_list);
340 nfs_free_delegation(delegation);
341 }
342}