]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/nfs/nfs4state.c
NFSv4: Add missing handling of OPEN_CONFIRM requests on CLAIM_DELEGATE_CUR.
[net-next-2.6.git] / fs / nfs / nfs4state.c
CommitLineData
1da177e4
LT
1/*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41#include <linux/config.h>
42#include <linux/slab.h>
43#include <linux/smp_lock.h>
44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h>
46#include <linux/workqueue.h>
47#include <linux/bitops.h>
48
4ce79717 49#include "nfs4_fs.h"
1da177e4
LT
50#include "callback.h"
51#include "delegation.h"
52
53#define OPENOWNER_POOL_SIZE 8
54
4ce79717 55const nfs4_stateid zero_stateid;
1da177e4 56
4ce79717 57static DEFINE_SPINLOCK(state_spinlock);
1da177e4
LT
58static LIST_HEAD(nfs4_clientid_list);
59
60static void nfs4_recover_state(void *);
1da177e4
LT
61
62void
63init_nfsv4_state(struct nfs_server *server)
64{
65 server->nfs4_state = NULL;
66 INIT_LIST_HEAD(&server->nfs4_siblings);
67}
68
69void
70destroy_nfsv4_state(struct nfs_server *server)
71{
72 if (server->mnt_path) {
73 kfree(server->mnt_path);
74 server->mnt_path = NULL;
75 }
76 if (server->nfs4_state) {
77 nfs4_put_client(server->nfs4_state);
78 server->nfs4_state = NULL;
79 }
80}
81
82/*
83 * nfs4_get_client(): returns an empty client structure
84 * nfs4_put_client(): drops reference to client structure
85 *
86 * Since these are allocated/deallocated very rarely, we don't
87 * bother putting them in a slab cache...
88 */
89static struct nfs4_client *
90nfs4_alloc_client(struct in_addr *addr)
91{
92 struct nfs4_client *clp;
93
94 if (nfs_callback_up() < 0)
95 return NULL;
96 if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
97 nfs_callback_down();
98 return NULL;
99 }
100 memset(clp, 0, sizeof(*clp));
101 memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
102 init_rwsem(&clp->cl_sem);
103 INIT_LIST_HEAD(&clp->cl_delegations);
104 INIT_LIST_HEAD(&clp->cl_state_owners);
105 INIT_LIST_HEAD(&clp->cl_unused);
106 spin_lock_init(&clp->cl_lock);
107 atomic_set(&clp->cl_count, 1);
108 INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
109 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
110 INIT_LIST_HEAD(&clp->cl_superblocks);
111 init_waitqueue_head(&clp->cl_waitq);
112 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
6a19275a 113 clp->cl_rpcclient = ERR_PTR(-EINVAL);
1da177e4
LT
114 clp->cl_boot_time = CURRENT_TIME;
115 clp->cl_state = 1 << NFS4CLNT_OK;
116 return clp;
117}
118
119static void
120nfs4_free_client(struct nfs4_client *clp)
121{
122 struct nfs4_state_owner *sp;
123
124 while (!list_empty(&clp->cl_unused)) {
125 sp = list_entry(clp->cl_unused.next,
126 struct nfs4_state_owner,
127 so_list);
128 list_del(&sp->so_list);
129 kfree(sp);
130 }
131 BUG_ON(!list_empty(&clp->cl_state_owners));
132 if (clp->cl_cred)
133 put_rpccred(clp->cl_cred);
134 nfs_idmap_delete(clp);
6a19275a 135 if (!IS_ERR(clp->cl_rpcclient))
1da177e4
LT
136 rpc_shutdown_client(clp->cl_rpcclient);
137 kfree(clp);
138 nfs_callback_down();
139}
140
141static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
142{
143 struct nfs4_client *clp;
144 list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
145 if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
146 atomic_inc(&clp->cl_count);
147 return clp;
148 }
149 }
150 return NULL;
151}
152
153struct nfs4_client *nfs4_find_client(struct in_addr *addr)
154{
155 struct nfs4_client *clp;
156 spin_lock(&state_spinlock);
157 clp = __nfs4_find_client(addr);
158 spin_unlock(&state_spinlock);
159 return clp;
160}
161
162struct nfs4_client *
163nfs4_get_client(struct in_addr *addr)
164{
165 struct nfs4_client *clp, *new = NULL;
166
167 spin_lock(&state_spinlock);
168 for (;;) {
169 clp = __nfs4_find_client(addr);
170 if (clp != NULL)
171 break;
172 clp = new;
173 if (clp != NULL) {
174 list_add(&clp->cl_servers, &nfs4_clientid_list);
175 new = NULL;
176 break;
177 }
178 spin_unlock(&state_spinlock);
179 new = nfs4_alloc_client(addr);
180 spin_lock(&state_spinlock);
181 if (new == NULL)
182 break;
183 }
184 spin_unlock(&state_spinlock);
185 if (new)
186 nfs4_free_client(new);
187 return clp;
188}
189
190void
191nfs4_put_client(struct nfs4_client *clp)
192{
193 if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
194 return;
195 list_del(&clp->cl_servers);
196 spin_unlock(&state_spinlock);
197 BUG_ON(!list_empty(&clp->cl_superblocks));
198 wake_up_all(&clp->cl_waitq);
199 rpc_wake_up(&clp->cl_rpcwaitq);
200 nfs4_kill_renewd(clp);
201 nfs4_free_client(clp);
202}
203
204static int __nfs4_init_client(struct nfs4_client *clp)
205{
206 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
207 if (status == 0)
208 status = nfs4_proc_setclientid_confirm(clp);
209 if (status == 0)
210 nfs4_schedule_state_renewal(clp);
211 return status;
212}
213
214int nfs4_init_client(struct nfs4_client *clp)
215{
216 return nfs4_map_errors(__nfs4_init_client(clp));
217}
218
219u32
220nfs4_alloc_lockowner_id(struct nfs4_client *clp)
221{
222 return clp->cl_lockowner_id ++;
223}
224
225static struct nfs4_state_owner *
226nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
227{
228 struct nfs4_state_owner *sp = NULL;
229
230 if (!list_empty(&clp->cl_unused)) {
231 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
232 atomic_inc(&sp->so_count);
233 sp->so_cred = cred;
234 list_move(&sp->so_list, &clp->cl_state_owners);
235 clp->cl_nunused--;
236 }
237 return sp;
238}
239
240static struct nfs4_state_owner *
241nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
242{
243 struct nfs4_state_owner *sp, *res = NULL;
244
245 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
246 if (sp->so_cred != cred)
247 continue;
248 atomic_inc(&sp->so_count);
249 /* Move to the head of the list */
250 list_move(&sp->so_list, &clp->cl_state_owners);
251 res = sp;
252 break;
253 }
254 return res;
255}
256
257/*
258 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
259 * create a new state_owner.
260 *
261 */
262static struct nfs4_state_owner *
263nfs4_alloc_state_owner(void)
264{
265 struct nfs4_state_owner *sp;
266
cee54fc9 267 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
1da177e4
LT
268 if (!sp)
269 return NULL;
1da177e4
LT
270 INIT_LIST_HEAD(&sp->so_states);
271 INIT_LIST_HEAD(&sp->so_delegations);
cee54fc9
TM
272 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
273 sp->so_seqid.sequence = &sp->so_sequence;
274 spin_lock_init(&sp->so_sequence.lock);
275 INIT_LIST_HEAD(&sp->so_sequence.list);
1da177e4
LT
276 atomic_set(&sp->so_count, 1);
277 return sp;
278}
279
280void
281nfs4_drop_state_owner(struct nfs4_state_owner *sp)
282{
283 struct nfs4_client *clp = sp->so_client;
284 spin_lock(&clp->cl_lock);
285 list_del_init(&sp->so_list);
286 spin_unlock(&clp->cl_lock);
287}
288
289/*
290 * Note: must be called with clp->cl_sem held in order to prevent races
291 * with reboot recovery!
292 */
293struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
294{
295 struct nfs4_client *clp = server->nfs4_state;
296 struct nfs4_state_owner *sp, *new;
297
298 get_rpccred(cred);
299 new = nfs4_alloc_state_owner();
300 spin_lock(&clp->cl_lock);
301 sp = nfs4_find_state_owner(clp, cred);
302 if (sp == NULL)
303 sp = nfs4_client_grab_unused(clp, cred);
304 if (sp == NULL && new != NULL) {
305 list_add(&new->so_list, &clp->cl_state_owners);
306 new->so_client = clp;
307 new->so_id = nfs4_alloc_lockowner_id(clp);
308 new->so_cred = cred;
309 sp = new;
310 new = NULL;
311 }
312 spin_unlock(&clp->cl_lock);
313 if (new)
314 kfree(new);
315 if (sp != NULL)
316 return sp;
317 put_rpccred(cred);
318 return NULL;
319}
320
321/*
322 * Must be called with clp->cl_sem held in order to avoid races
323 * with state recovery...
324 */
325void nfs4_put_state_owner(struct nfs4_state_owner *sp)
326{
327 struct nfs4_client *clp = sp->so_client;
328 struct rpc_cred *cred = sp->so_cred;
329
330 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
331 return;
332 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
333 goto out_free;
334 if (list_empty(&sp->so_list))
335 goto out_free;
336 list_move(&sp->so_list, &clp->cl_unused);
337 clp->cl_nunused++;
338 spin_unlock(&clp->cl_lock);
339 put_rpccred(cred);
340 cred = NULL;
341 return;
342out_free:
343 list_del(&sp->so_list);
344 spin_unlock(&clp->cl_lock);
345 put_rpccred(cred);
346 kfree(sp);
347}
348
349static struct nfs4_state *
350nfs4_alloc_open_state(void)
351{
352 struct nfs4_state *state;
353
354 state = kmalloc(sizeof(*state), GFP_KERNEL);
355 if (!state)
356 return NULL;
357 state->state = 0;
358 state->nreaders = 0;
359 state->nwriters = 0;
360 state->flags = 0;
361 memset(state->stateid.data, 0, sizeof(state->stateid.data));
362 atomic_set(&state->count, 1);
363 INIT_LIST_HEAD(&state->lock_states);
8d0a8a9d 364 spin_lock_init(&state->state_lock);
1da177e4
LT
365 return state;
366}
367
368static struct nfs4_state *
369__nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
370{
371 struct nfs_inode *nfsi = NFS_I(inode);
372 struct nfs4_state *state;
373
374 mode &= (FMODE_READ|FMODE_WRITE);
375 list_for_each_entry(state, &nfsi->open_states, inode_states) {
376 if (state->owner->so_cred != cred)
377 continue;
378 if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
379 continue;
380 if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
381 continue;
382 if ((state->state & mode) != mode)
383 continue;
384 atomic_inc(&state->count);
385 if (mode & FMODE_READ)
386 state->nreaders++;
387 if (mode & FMODE_WRITE)
388 state->nwriters++;
389 return state;
390 }
391 return NULL;
392}
393
394static struct nfs4_state *
395__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
396{
397 struct nfs_inode *nfsi = NFS_I(inode);
398 struct nfs4_state *state;
399
400 list_for_each_entry(state, &nfsi->open_states, inode_states) {
401 /* Is this in the process of being freed? */
402 if (state->nreaders == 0 && state->nwriters == 0)
403 continue;
404 if (state->owner == owner) {
405 atomic_inc(&state->count);
406 return state;
407 }
408 }
409 return NULL;
410}
411
412struct nfs4_state *
413nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
414{
415 struct nfs4_state *state;
416
417 spin_lock(&inode->i_lock);
418 state = __nfs4_find_state(inode, cred, mode);
419 spin_unlock(&inode->i_lock);
420 return state;
421}
422
423static void
424nfs4_free_open_state(struct nfs4_state *state)
425{
426 kfree(state);
427}
428
429struct nfs4_state *
430nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
431{
432 struct nfs4_state *state, *new;
433 struct nfs_inode *nfsi = NFS_I(inode);
434
435 spin_lock(&inode->i_lock);
436 state = __nfs4_find_state_byowner(inode, owner);
437 spin_unlock(&inode->i_lock);
438 if (state)
439 goto out;
440 new = nfs4_alloc_open_state();
441 spin_lock(&inode->i_lock);
442 state = __nfs4_find_state_byowner(inode, owner);
443 if (state == NULL && new != NULL) {
444 state = new;
1da177e4
LT
445 /* Note: The reclaim code dictates that we add stateless
446 * and read-only stateids to the end of the list */
447 list_add_tail(&state->open_states, &owner->so_states);
448 state->owner = owner;
449 atomic_inc(&owner->so_count);
450 list_add(&state->inode_states, &nfsi->open_states);
451 state->inode = igrab(inode);
452 spin_unlock(&inode->i_lock);
453 } else {
454 spin_unlock(&inode->i_lock);
455 if (new)
456 nfs4_free_open_state(new);
457 }
458out:
459 return state;
460}
461
462/*
463 * Beware! Caller must be holding exactly one
e6dfa553 464 * reference to clp->cl_sem!
1da177e4
LT
465 */
466void nfs4_put_open_state(struct nfs4_state *state)
467{
468 struct inode *inode = state->inode;
469 struct nfs4_state_owner *owner = state->owner;
470
471 if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
472 return;
473 if (!list_empty(&state->inode_states))
474 list_del(&state->inode_states);
475 spin_unlock(&inode->i_lock);
476 list_del(&state->open_states);
477 iput(inode);
478 BUG_ON (state->state != 0);
479 nfs4_free_open_state(state);
480 nfs4_put_state_owner(owner);
481}
482
483/*
83c9d41e 484 * Close the current file.
1da177e4
LT
485 */
486void nfs4_close_state(struct nfs4_state *state, mode_t mode)
487{
488 struct inode *inode = state->inode;
489 struct nfs4_state_owner *owner = state->owner;
1da177e4
LT
490 int newstate;
491
492 atomic_inc(&owner->so_count);
1da177e4
LT
493 /* Protect against nfs4_find_state() */
494 spin_lock(&inode->i_lock);
495 if (mode & FMODE_READ)
496 state->nreaders--;
497 if (mode & FMODE_WRITE)
498 state->nwriters--;
499 if (state->nwriters == 0) {
500 if (state->nreaders == 0)
501 list_del_init(&state->inode_states);
502 /* See reclaim code */
503 list_move_tail(&state->open_states, &owner->so_states);
504 }
505 spin_unlock(&inode->i_lock);
506 newstate = 0;
507 if (state->state != 0) {
508 if (state->nreaders)
509 newstate |= FMODE_READ;
510 if (state->nwriters)
511 newstate |= FMODE_WRITE;
512 if (state->state == newstate)
513 goto out;
9512135d
TM
514 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
515 state->state = newstate;
516 goto out;
517 }
518 if (nfs4_do_close(inode, state, newstate) == 0)
1da177e4
LT
519 return;
520 }
521out:
522 nfs4_put_open_state(state);
1da177e4 523 nfs4_put_state_owner(owner);
1da177e4
LT
524}
525
526/*
527 * Search the state->lock_states for an existing lock_owner
528 * that is compatible with current->files
529 */
530static struct nfs4_lock_state *
531__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
532{
533 struct nfs4_lock_state *pos;
534 list_for_each_entry(pos, &state->lock_states, ls_locks) {
535 if (pos->ls_owner != fl_owner)
536 continue;
537 atomic_inc(&pos->ls_count);
538 return pos;
539 }
540 return NULL;
541}
542
1da177e4
LT
543/*
544 * Return a compatible lock_state. If no initialized lock_state structure
545 * exists, return an uninitialized one.
546 *
1da177e4
LT
547 */
548static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
549{
550 struct nfs4_lock_state *lsp;
551 struct nfs4_client *clp = state->owner->so_client;
552
cee54fc9 553 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
1da177e4
LT
554 if (lsp == NULL)
555 return NULL;
cee54fc9 556 lsp->ls_seqid.sequence = &state->owner->so_sequence;
1da177e4
LT
557 atomic_set(&lsp->ls_count, 1);
558 lsp->ls_owner = fl_owner;
1da177e4
LT
559 spin_lock(&clp->cl_lock);
560 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
561 spin_unlock(&clp->cl_lock);
8d0a8a9d 562 INIT_LIST_HEAD(&lsp->ls_locks);
1da177e4
LT
563 return lsp;
564}
565
566/*
567 * Return a compatible lock_state. If no initialized lock_state structure
568 * exists, return an uninitialized one.
569 *
e6dfa553 570 * The caller must be holding clp->cl_sem
1da177e4 571 */
8d0a8a9d 572static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
1da177e4 573{
8d0a8a9d 574 struct nfs4_lock_state *lsp, *new = NULL;
1da177e4 575
8d0a8a9d
TM
576 for(;;) {
577 spin_lock(&state->state_lock);
578 lsp = __nfs4_find_lock_state(state, owner);
579 if (lsp != NULL)
580 break;
581 if (new != NULL) {
582 new->ls_state = state;
583 list_add(&new->ls_locks, &state->lock_states);
584 set_bit(LK_STATE_IN_USE, &state->flags);
585 lsp = new;
586 new = NULL;
587 break;
588 }
589 spin_unlock(&state->state_lock);
590 new = nfs4_alloc_lock_state(state, owner);
591 if (new == NULL)
592 return NULL;
593 }
594 spin_unlock(&state->state_lock);
595 kfree(new);
1da177e4
LT
596 return lsp;
597}
598
599/*
8d0a8a9d
TM
600 * Release reference to lock_state, and free it if we see that
601 * it is no longer in use
1da177e4 602 */
8d0a8a9d 603static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
1da177e4 604{
8d0a8a9d 605 struct nfs4_state *state;
1da177e4 606
8d0a8a9d
TM
607 if (lsp == NULL)
608 return;
609 state = lsp->ls_state;
610 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
611 return;
612 list_del(&lsp->ls_locks);
613 if (list_empty(&state->lock_states))
614 clear_bit(LK_STATE_IN_USE, &state->flags);
615 spin_unlock(&state->state_lock);
616 kfree(lsp);
1da177e4
LT
617}
618
8d0a8a9d 619static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
1da177e4 620{
8d0a8a9d 621 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
1da177e4 622
8d0a8a9d
TM
623 dst->fl_u.nfs4_fl.owner = lsp;
624 atomic_inc(&lsp->ls_count);
625}
1da177e4 626
8d0a8a9d 627static void nfs4_fl_release_lock(struct file_lock *fl)
1da177e4 628{
8d0a8a9d 629 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
1da177e4
LT
630}
631
8d0a8a9d
TM
632static struct file_lock_operations nfs4_fl_lock_ops = {
633 .fl_copy_lock = nfs4_fl_copy_lock,
634 .fl_release_private = nfs4_fl_release_lock,
635};
636
637int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
1da177e4 638{
8d0a8a9d
TM
639 struct nfs4_lock_state *lsp;
640
641 if (fl->fl_ops != NULL)
642 return 0;
643 lsp = nfs4_get_lock_state(state, fl->fl_owner);
644 if (lsp == NULL)
645 return -ENOMEM;
646 fl->fl_u.nfs4_fl.owner = lsp;
647 fl->fl_ops = &nfs4_fl_lock_ops;
648 return 0;
1da177e4
LT
649}
650
8d0a8a9d
TM
651/*
652 * Byte-range lock aware utility to initialize the stateid of read/write
653 * requests.
1da177e4 654 */
8d0a8a9d 655void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
1da177e4 656{
8d0a8a9d 657 struct nfs4_lock_state *lsp;
1da177e4 658
8d0a8a9d
TM
659 memcpy(dst, &state->stateid, sizeof(*dst));
660 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
661 return;
1da177e4 662
8d0a8a9d
TM
663 spin_lock(&state->state_lock);
664 lsp = __nfs4_find_lock_state(state, fl_owner);
665 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
666 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
667 spin_unlock(&state->state_lock);
1da177e4
LT
668 nfs4_put_lock_state(lsp);
669}
670
cee54fc9
TM
671struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
672{
673 struct rpc_sequence *sequence = counter->sequence;
674 struct nfs_seqid *new;
675
676 new = kmalloc(sizeof(*new), GFP_KERNEL);
677 if (new != NULL) {
678 new->sequence = counter;
679 new->task = NULL;
680 spin_lock(&sequence->lock);
681 list_add_tail(&new->list, &sequence->list);
682 spin_unlock(&sequence->lock);
683 }
684 return new;
685}
686
687void nfs_free_seqid(struct nfs_seqid *seqid)
1da177e4 688{
cee54fc9
TM
689 struct rpc_sequence *sequence = seqid->sequence->sequence;
690 struct rpc_task *next = NULL;
691
692 spin_lock(&sequence->lock);
693 list_del(&seqid->list);
694 if (!list_empty(&sequence->list)) {
695 next = list_entry(sequence->list.next, struct nfs_seqid, list)->task;
696 if (next)
697 rpc_wake_up_task(next);
698 }
699 spin_unlock(&sequence->lock);
700 kfree(seqid);
1da177e4
LT
701}
702
703/*
cee54fc9
TM
704 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
705 * failed with a seqid incrementing error -
706 * see comments nfs_fs.h:seqid_mutating_error()
707 */
708static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
709{
710 switch (status) {
711 case 0:
712 break;
713 case -NFS4ERR_BAD_SEQID:
714 case -NFS4ERR_STALE_CLIENTID:
715 case -NFS4ERR_STALE_STATEID:
716 case -NFS4ERR_BAD_STATEID:
717 case -NFS4ERR_BADXDR:
718 case -NFS4ERR_RESOURCE:
719 case -NFS4ERR_NOFILEHANDLE:
720 /* Non-seqid mutating errors */
721 return;
722 };
723 /*
724 * Note: no locking needed as we are guaranteed to be first
725 * on the sequence list
726 */
727 seqid->sequence->counter++;
728}
729
730void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
731{
732 if (status == -NFS4ERR_BAD_SEQID) {
733 struct nfs4_state_owner *sp = container_of(seqid->sequence,
734 struct nfs4_state_owner, so_seqid);
1da177e4 735 nfs4_drop_state_owner(sp);
cee54fc9
TM
736 }
737 return nfs_increment_seqid(status, seqid);
738}
739
740/*
cee54fc9
TM
741 * Increment the seqid if the LOCK/LOCKU succeeded, or
742 * failed with a seqid incrementing error -
743 * see comments nfs_fs.h:seqid_mutating_error()
744 */
745void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
746{
747 return nfs_increment_seqid(status, seqid);
748}
749
750int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
751{
752 struct rpc_sequence *sequence = seqid->sequence->sequence;
753 int status = 0;
754
755 spin_lock(&sequence->lock);
756 if (sequence->list.next != &seqid->list) {
757 seqid->task = task;
758 rpc_sleep_on(&sequence->wait, task, NULL, NULL);
759 status = -EAGAIN;
760 }
761 spin_unlock(&sequence->lock);
762 return status;
1da177e4
LT
763}
764
765static int reclaimer(void *);
766struct reclaimer_args {
767 struct nfs4_client *clp;
768 struct completion complete;
769};
770
771/*
772 * State recovery routine
773 */
774void
775nfs4_recover_state(void *data)
776{
777 struct nfs4_client *clp = (struct nfs4_client *)data;
778 struct reclaimer_args args = {
779 .clp = clp,
780 };
781 might_sleep();
782
783 init_completion(&args.complete);
784
785 if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
786 goto out_failed_clear;
787 wait_for_completion(&args.complete);
788 return;
789out_failed_clear:
790 set_bit(NFS4CLNT_OK, &clp->cl_state);
791 wake_up_all(&clp->cl_waitq);
792 rpc_wake_up(&clp->cl_rpcwaitq);
793}
794
795/*
796 * Schedule a state recovery attempt
797 */
798void
799nfs4_schedule_state_recovery(struct nfs4_client *clp)
800{
801 if (!clp)
802 return;
803 if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
804 schedule_work(&clp->cl_recoverd);
805}
806
807static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
808{
809 struct inode *inode = state->inode;
810 struct file_lock *fl;
811 int status = 0;
812
813 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
814 if (!(fl->fl_flags & FL_POSIX))
815 continue;
816 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
817 continue;
818 status = ops->recover_lock(state, fl);
819 if (status >= 0)
820 continue;
821 switch (status) {
822 default:
823 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
824 __FUNCTION__, status);
825 case -NFS4ERR_EXPIRED:
826 case -NFS4ERR_NO_GRACE:
827 case -NFS4ERR_RECLAIM_BAD:
828 case -NFS4ERR_RECLAIM_CONFLICT:
829 /* kill_proc(fl->fl_owner, SIGLOST, 1); */
830 break;
831 case -NFS4ERR_STALE_CLIENTID:
832 goto out_err;
833 }
834 }
835 return 0;
836out_err:
837 return status;
838}
839
840static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
841{
842 struct nfs4_state *state;
843 struct nfs4_lock_state *lock;
844 int status = 0;
845
846 /* Note: we rely on the sp->so_states list being ordered
847 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
848 * states first.
849 * This is needed to ensure that the server won't give us any
850 * read delegations that we have to return if, say, we are
851 * recovering after a network partition or a reboot from a
852 * server that doesn't support a grace period.
853 */
854 list_for_each_entry(state, &sp->so_states, open_states) {
855 if (state->state == 0)
856 continue;
857 status = ops->recover_open(sp, state);
1da177e4
LT
858 if (status >= 0) {
859 status = nfs4_reclaim_locks(ops, state);
860 if (status < 0)
861 goto out_err;
862 list_for_each_entry(lock, &state->lock_states, ls_locks) {
863 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
864 printk("%s: Lock reclaim failed!\n",
865 __FUNCTION__);
866 }
867 continue;
868 }
869 switch (status) {
870 default:
871 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
872 __FUNCTION__, status);
873 case -ENOENT:
874 case -NFS4ERR_RECLAIM_BAD:
875 case -NFS4ERR_RECLAIM_CONFLICT:
876 /*
877 * Open state on this file cannot be recovered
878 * All we can do is revert to using the zero stateid.
879 */
880 memset(state->stateid.data, 0,
881 sizeof(state->stateid.data));
882 /* Mark the file as being 'closed' */
883 state->state = 0;
884 break;
885 case -NFS4ERR_EXPIRED:
886 case -NFS4ERR_NO_GRACE:
887 case -NFS4ERR_STALE_CLIENTID:
888 goto out_err;
889 }
890 }
891 return 0;
892out_err:
893 return status;
894}
895
cee54fc9
TM
896static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
897{
898 struct nfs4_state_owner *sp;
899 struct nfs4_state *state;
900 struct nfs4_lock_state *lock;
901
902 /* Reset all sequence ids to zero */
903 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
904 sp->so_seqid.counter = 0;
905 sp->so_seqid.flags = 0;
906 list_for_each_entry(state, &sp->so_states, open_states) {
907 list_for_each_entry(lock, &state->lock_states, ls_locks) {
908 lock->ls_seqid.counter = 0;
909 lock->ls_seqid.flags = 0;
910 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
911 }
912 }
913 }
914}
915
1da177e4
LT
916static int reclaimer(void *ptr)
917{
918 struct reclaimer_args *args = (struct reclaimer_args *)ptr;
919 struct nfs4_client *clp = args->clp;
920 struct nfs4_state_owner *sp;
921 struct nfs4_state_recovery_ops *ops;
922 int status = 0;
923
924 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
925 allow_signal(SIGKILL);
926
927 atomic_inc(&clp->cl_count);
928 complete(&args->complete);
929
930 /* Ensure exclusive access to NFSv4 state */
931 lock_kernel();
932 down_write(&clp->cl_sem);
933 /* Are there any NFS mounts out there? */
934 if (list_empty(&clp->cl_superblocks))
935 goto out;
936restart_loop:
937 status = nfs4_proc_renew(clp);
938 switch (status) {
939 case 0:
940 case -NFS4ERR_CB_PATH_DOWN:
941 goto out;
942 case -NFS4ERR_STALE_CLIENTID:
943 case -NFS4ERR_LEASE_MOVED:
944 ops = &nfs4_reboot_recovery_ops;
945 break;
946 default:
947 ops = &nfs4_network_partition_recovery_ops;
948 };
cee54fc9 949 nfs4_state_mark_reclaim(clp);
1da177e4
LT
950 status = __nfs4_init_client(clp);
951 if (status)
952 goto out_error;
953 /* Mark all delegations for reclaim */
954 nfs_delegation_mark_reclaim(clp);
955 /* Note: list is protected by exclusive lock on cl->cl_sem */
956 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
957 status = nfs4_reclaim_open_state(ops, sp);
958 if (status < 0) {
959 if (status == -NFS4ERR_NO_GRACE) {
960 ops = &nfs4_network_partition_recovery_ops;
961 status = nfs4_reclaim_open_state(ops, sp);
962 }
963 if (status == -NFS4ERR_STALE_CLIENTID)
964 goto restart_loop;
965 if (status == -NFS4ERR_EXPIRED)
966 goto restart_loop;
967 }
968 }
969 nfs_delegation_reap_unclaimed(clp);
970out:
971 set_bit(NFS4CLNT_OK, &clp->cl_state);
972 up_write(&clp->cl_sem);
973 unlock_kernel();
974 wake_up_all(&clp->cl_waitq);
975 rpc_wake_up(&clp->cl_rpcwaitq);
976 if (status == -NFS4ERR_CB_PATH_DOWN)
977 nfs_handle_cb_pathdown(clp);
978 nfs4_put_client(clp);
979 return 0;
980out_error:
981 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
982 NIPQUAD(clp->cl_addr.s_addr), -status);
983 goto out;
984}
985
986/*
987 * Local variables:
988 * c-basic-offset: 8
989 * End:
990 */