]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/dlm/lockspace.c
Merge branch 'uc-logic' into for-linus
[net-next-2.6.git] / fs / dlm / lockspace.c
CommitLineData
e7fd4179
DT
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0f8e0d9a 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
e7fd4179
DT
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
18#include "ast.h"
19#include "dir.h"
20#include "lowcomms.h"
21#include "config.h"
22#include "memory.h"
23#include "lock.h"
c56b39cd 24#include "recover.h"
2896ee37 25#include "requestqueue.h"
0f8e0d9a 26#include "user.h"
e7fd4179 27
e7fd4179 28static int ls_count;
90135925 29static struct mutex ls_lock;
e7fd4179
DT
30static struct list_head lslist;
31static spinlock_t lslist_lock;
32static struct task_struct * scand_task;
33
34
35static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
36{
37 ssize_t ret = len;
38 int n = simple_strtol(buf, NULL, 0);
39
e2de7f56
PC
40 ls = dlm_find_lockspace_local(ls->ls_local_handle);
41 if (!ls)
42 return -EINVAL;
43
e7fd4179
DT
44 switch (n) {
45 case 0:
46 dlm_ls_stop(ls);
47 break;
48 case 1:
49 dlm_ls_start(ls);
50 break;
51 default:
52 ret = -EINVAL;
53 }
e2de7f56 54 dlm_put_lockspace(ls);
e7fd4179
DT
55 return ret;
56}
57
58static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
59{
60 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
61 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
62 wake_up(&ls->ls_uevent_wait);
63 return len;
64}
65
66static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
67{
a1d144c7 68 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
e7fd4179
DT
69}
70
71static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
72{
73 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
74 return len;
75}
76
c56b39cd
DT
77static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
78{
79 uint32_t status = dlm_recover_status(ls);
a1d144c7 80 return snprintf(buf, PAGE_SIZE, "%x\n", status);
c56b39cd
DT
81}
82
faa0f267
DT
83static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
84{
a1d144c7 85 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
faa0f267
DT
86}
87
e7fd4179
DT
88struct dlm_attr {
89 struct attribute attr;
90 ssize_t (*show)(struct dlm_ls *, char *);
91 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
92};
93
94static struct dlm_attr dlm_attr_control = {
95 .attr = {.name = "control", .mode = S_IWUSR},
96 .store = dlm_control_store
97};
98
99static struct dlm_attr dlm_attr_event = {
100 .attr = {.name = "event_done", .mode = S_IWUSR},
101 .store = dlm_event_store
102};
103
104static struct dlm_attr dlm_attr_id = {
105 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
106 .show = dlm_id_show,
107 .store = dlm_id_store
108};
109
c56b39cd
DT
110static struct dlm_attr dlm_attr_recover_status = {
111 .attr = {.name = "recover_status", .mode = S_IRUGO},
112 .show = dlm_recover_status_show
113};
114
faa0f267
DT
115static struct dlm_attr dlm_attr_recover_nodeid = {
116 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
117 .show = dlm_recover_nodeid_show
118};
119
e7fd4179
DT
120static struct attribute *dlm_attrs[] = {
121 &dlm_attr_control.attr,
122 &dlm_attr_event.attr,
123 &dlm_attr_id.attr,
c56b39cd 124 &dlm_attr_recover_status.attr,
faa0f267 125 &dlm_attr_recover_nodeid.attr,
e7fd4179
DT
126 NULL,
127};
128
129static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
130 char *buf)
131{
132 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
133 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
134 return a->show ? a->show(ls, buf) : 0;
135}
136
137static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
138 const char *buf, size_t len)
139{
140 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
141 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
142 return a->store ? a->store(ls, buf, len) : len;
143}
144
ba542e3b
PC
145static void lockspace_kobj_release(struct kobject *k)
146{
147 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
148 kfree(ls);
149}
150
52cf25d0 151static const struct sysfs_ops dlm_attr_ops = {
e7fd4179
DT
152 .show = dlm_attr_show,
153 .store = dlm_attr_store,
154};
155
156static struct kobj_type dlm_ktype = {
157 .default_attrs = dlm_attrs,
158 .sysfs_ops = &dlm_attr_ops,
ba542e3b 159 .release = lockspace_kobj_release,
e7fd4179
DT
160};
161
d405936b 162static struct kset *dlm_kset;
e7fd4179 163
e7fd4179
DT
164static int do_uevent(struct dlm_ls *ls, int in)
165{
166 int error;
167
168 if (in)
169 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
170 else
171 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
172
8b0e7b2c
DT
173 log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");
174
175 /* dlm_controld will see the uevent, do the necessary group management
176 and then write to sysfs to wake us */
177
e7fd4179
DT
178 error = wait_event_interruptible(ls->ls_uevent_wait,
179 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
8b0e7b2c
DT
180
181 log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);
182
e7fd4179
DT
183 if (error)
184 goto out;
185
186 error = ls->ls_uevent_result;
187 out:
8b0e7b2c
DT
188 if (error)
189 log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
190 error, ls->ls_uevent_result);
e7fd4179
DT
191 return error;
192}
193
b4a5d4bc
SW
194static int dlm_uevent(struct kset *kset, struct kobject *kobj,
195 struct kobj_uevent_env *env)
196{
197 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
198
199 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
200 return 0;
201}
202
203static struct kset_uevent_ops dlm_uevent_ops = {
204 .uevent = dlm_uevent,
205};
e7fd4179 206
30727174 207int __init dlm_lockspace_init(void)
e7fd4179 208{
e7fd4179 209 ls_count = 0;
90135925 210 mutex_init(&ls_lock);
e7fd4179
DT
211 INIT_LIST_HEAD(&lslist);
212 spin_lock_init(&lslist_lock);
213
b4a5d4bc 214 dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj);
d405936b 215 if (!dlm_kset) {
8e24eea7 216 printk(KERN_WARNING "%s: can not create kset\n", __func__);
d405936b
GKH
217 return -ENOMEM;
218 }
219 return 0;
e7fd4179
DT
220}
221
222void dlm_lockspace_exit(void)
223{
d405936b 224 kset_unregister(dlm_kset);
e7fd4179
DT
225}
226
c1dcf65f
DT
227static struct dlm_ls *find_ls_to_scan(void)
228{
229 struct dlm_ls *ls;
230
231 spin_lock(&lslist_lock);
232 list_for_each_entry(ls, &lslist, ls_list) {
233 if (time_after_eq(jiffies, ls->ls_scan_time +
234 dlm_config.ci_scan_secs * HZ)) {
235 spin_unlock(&lslist_lock);
236 return ls;
237 }
238 }
239 spin_unlock(&lslist_lock);
240 return NULL;
241}
242
e7fd4179
DT
243static int dlm_scand(void *data)
244{
245 struct dlm_ls *ls;
c1dcf65f 246 int timeout_jiffies = dlm_config.ci_scan_secs * HZ;
e7fd4179
DT
247
248 while (!kthread_should_stop()) {
c1dcf65f
DT
249 ls = find_ls_to_scan();
250 if (ls) {
85e86edf 251 if (dlm_lock_recovery_try(ls)) {
c1dcf65f 252 ls->ls_scan_time = jiffies;
85e86edf 253 dlm_scan_rsbs(ls);
3ae1acf9 254 dlm_scan_timeout(ls);
85e86edf 255 dlm_unlock_recovery(ls);
c1dcf65f
DT
256 } else {
257 ls->ls_scan_time += HZ;
85e86edf 258 }
c1dcf65f
DT
259 } else {
260 schedule_timeout_interruptible(timeout_jiffies);
85e86edf 261 }
e7fd4179
DT
262 }
263 return 0;
264}
265
266static int dlm_scand_start(void)
267{
268 struct task_struct *p;
269 int error = 0;
270
271 p = kthread_run(dlm_scand, NULL, "dlm_scand");
272 if (IS_ERR(p))
273 error = PTR_ERR(p);
274 else
275 scand_task = p;
276 return error;
277}
278
279static void dlm_scand_stop(void)
280{
281 kthread_stop(scand_task);
282}
283
e7fd4179
DT
284struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
285{
286 struct dlm_ls *ls;
287
288 spin_lock(&lslist_lock);
289
290 list_for_each_entry(ls, &lslist, ls_list) {
291 if (ls->ls_global_id == id) {
292 ls->ls_count++;
293 goto out;
294 }
295 }
296 ls = NULL;
297 out:
298 spin_unlock(&lslist_lock);
299 return ls;
300}
301
597d0cae 302struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
e7fd4179 303{
597d0cae 304 struct dlm_ls *ls;
e7fd4179
DT
305
306 spin_lock(&lslist_lock);
597d0cae
DT
307 list_for_each_entry(ls, &lslist, ls_list) {
308 if (ls->ls_local_handle == lockspace) {
309 ls->ls_count++;
310 goto out;
311 }
312 }
313 ls = NULL;
314 out:
315 spin_unlock(&lslist_lock);
316 return ls;
317}
318
319struct dlm_ls *dlm_find_lockspace_device(int minor)
320{
321 struct dlm_ls *ls;
322
323 spin_lock(&lslist_lock);
324 list_for_each_entry(ls, &lslist, ls_list) {
325 if (ls->ls_device.minor == minor) {
326 ls->ls_count++;
327 goto out;
328 }
329 }
330 ls = NULL;
331 out:
e7fd4179
DT
332 spin_unlock(&lslist_lock);
333 return ls;
334}
335
336void dlm_put_lockspace(struct dlm_ls *ls)
337{
338 spin_lock(&lslist_lock);
339 ls->ls_count--;
340 spin_unlock(&lslist_lock);
341}
342
343static void remove_lockspace(struct dlm_ls *ls)
344{
345 for (;;) {
346 spin_lock(&lslist_lock);
347 if (ls->ls_count == 0) {
0f8e0d9a 348 WARN_ON(ls->ls_create_count != 0);
e7fd4179
DT
349 list_del(&ls->ls_list);
350 spin_unlock(&lslist_lock);
351 return;
352 }
353 spin_unlock(&lslist_lock);
354 ssleep(1);
355 }
356}
357
358static int threads_start(void)
359{
360 int error;
361
362 /* Thread which process lock requests for all lockspace's */
363 error = dlm_astd_start();
364 if (error) {
365 log_print("cannot start dlm_astd thread %d", error);
366 goto fail;
367 }
368
369 error = dlm_scand_start();
370 if (error) {
371 log_print("cannot start dlm_scand thread %d", error);
372 goto astd_fail;
373 }
374
375 /* Thread for sending/receiving messages for all lockspace's */
376 error = dlm_lowcomms_start();
377 if (error) {
378 log_print("cannot start dlm lowcomms %d", error);
379 goto scand_fail;
380 }
381
382 return 0;
383
384 scand_fail:
385 dlm_scand_stop();
386 astd_fail:
387 dlm_astd_stop();
388 fail:
389 return error;
390}
391
392static void threads_stop(void)
393{
394 dlm_scand_stop();
395 dlm_lowcomms_stop();
396 dlm_astd_stop();
397}
398
08ce4c91 399static int new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
400 uint32_t flags, int lvblen)
401{
402 struct dlm_ls *ls;
0f8e0d9a 403 int i, size, error;
79d72b54 404 int do_unreg = 0;
e7fd4179
DT
405
406 if (namelen > DLM_LOCKSPACE_LEN)
407 return -EINVAL;
408
409 if (!lvblen || (lvblen % 8))
410 return -EINVAL;
411
412 if (!try_module_get(THIS_MODULE))
413 return -EINVAL;
414
dc68c7ed
DT
415 if (!dlm_user_daemon_available()) {
416 module_put(THIS_MODULE);
417 return -EUNATCH;
418 }
419
0f8e0d9a
DT
420 error = 0;
421
422 spin_lock(&lslist_lock);
423 list_for_each_entry(ls, &lslist, ls_list) {
424 WARN_ON(ls->ls_create_count <= 0);
425 if (ls->ls_namelen != namelen)
426 continue;
427 if (memcmp(ls->ls_name, name, namelen))
428 continue;
429 if (flags & DLM_LSFL_NEWEXCL) {
430 error = -EEXIST;
431 break;
432 }
433 ls->ls_create_count++;
8511a272
DT
434 *lockspace = ls;
435 error = 1;
0f8e0d9a 436 break;
e7fd4179 437 }
0f8e0d9a
DT
438 spin_unlock(&lslist_lock);
439
0f8e0d9a 440 if (error)
8511a272 441 goto out;
0f8e0d9a
DT
442
443 error = -ENOMEM;
e7fd4179 444
573c24c4 445 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
e7fd4179
DT
446 if (!ls)
447 goto out;
e7fd4179
DT
448 memcpy(ls->ls_name, name, namelen);
449 ls->ls_namelen = namelen;
e7fd4179
DT
450 ls->ls_lvblen = lvblen;
451 ls->ls_count = 0;
452 ls->ls_flags = 0;
c1dcf65f 453 ls->ls_scan_time = jiffies;
e7fd4179 454
3ae1acf9
DT
455 if (flags & DLM_LSFL_TIMEWARN)
456 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
3ae1acf9 457
fad59c13 458 /* ls_exflags are forced to match among nodes, and we don't
0f8e0d9a
DT
459 need to require all nodes to have some flags set */
460 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
461 DLM_LSFL_NEWEXCL));
fad59c13 462
68c817a1 463 size = dlm_config.ci_rsbtbl_size;
e7fd4179
DT
464 ls->ls_rsbtbl_size = size;
465
573c24c4 466 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS);
e7fd4179
DT
467 if (!ls->ls_rsbtbl)
468 goto out_lsfree;
469 for (i = 0; i < size; i++) {
470 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
471 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
c7be761a 472 spin_lock_init(&ls->ls_rsbtbl[i].lock);
e7fd4179
DT
473 }
474
68c817a1 475 size = dlm_config.ci_lkbtbl_size;
e7fd4179
DT
476 ls->ls_lkbtbl_size = size;
477
573c24c4 478 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS);
e7fd4179
DT
479 if (!ls->ls_lkbtbl)
480 goto out_rsbfree;
481 for (i = 0; i < size; i++) {
482 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
483 rwlock_init(&ls->ls_lkbtbl[i].lock);
484 ls->ls_lkbtbl[i].counter = 1;
485 }
486
68c817a1 487 size = dlm_config.ci_dirtbl_size;
e7fd4179
DT
488 ls->ls_dirtbl_size = size;
489
573c24c4 490 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS);
e7fd4179
DT
491 if (!ls->ls_dirtbl)
492 goto out_lkbfree;
493 for (i = 0; i < size; i++) {
494 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
305a47b1 495 spin_lock_init(&ls->ls_dirtbl[i].lock);
e7fd4179
DT
496 }
497
498 INIT_LIST_HEAD(&ls->ls_waiters);
90135925 499 mutex_init(&ls->ls_waiters_mutex);
ef0c2bb0
DT
500 INIT_LIST_HEAD(&ls->ls_orphans);
501 mutex_init(&ls->ls_orphans_mutex);
3ae1acf9
DT
502 INIT_LIST_HEAD(&ls->ls_timeout);
503 mutex_init(&ls->ls_timeout_mutex);
e7fd4179
DT
504
505 INIT_LIST_HEAD(&ls->ls_nodes);
506 INIT_LIST_HEAD(&ls->ls_nodes_gone);
507 ls->ls_num_nodes = 0;
508 ls->ls_low_nodeid = 0;
509 ls->ls_total_weight = 0;
510 ls->ls_node_array = NULL;
511
512 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
513 ls->ls_stub_rsb.res_ls = ls;
514
5de6319b
DT
515 ls->ls_debug_rsb_dentry = NULL;
516 ls->ls_debug_waiters_dentry = NULL;
e7fd4179
DT
517
518 init_waitqueue_head(&ls->ls_uevent_wait);
519 ls->ls_uevent_result = 0;
8b0e7b2c
DT
520 init_completion(&ls->ls_members_done);
521 ls->ls_members_result = -1;
e7fd4179
DT
522
523 ls->ls_recoverd_task = NULL;
90135925 524 mutex_init(&ls->ls_recoverd_active);
e7fd4179 525 spin_lock_init(&ls->ls_recover_lock);
98f176fb
DT
526 spin_lock_init(&ls->ls_rcom_spin);
527 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
e7fd4179
DT
528 ls->ls_recover_status = 0;
529 ls->ls_recover_seq = 0;
530 ls->ls_recover_args = NULL;
531 init_rwsem(&ls->ls_in_recovery);
c36258b5 532 init_rwsem(&ls->ls_recv_active);
e7fd4179 533 INIT_LIST_HEAD(&ls->ls_requestqueue);
90135925 534 mutex_init(&ls->ls_requestqueue_mutex);
597d0cae 535 mutex_init(&ls->ls_clear_proc_locks);
e7fd4179 536
573c24c4 537 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
e7fd4179
DT
538 if (!ls->ls_recover_buf)
539 goto out_dirfree;
540
541 INIT_LIST_HEAD(&ls->ls_recover_list);
542 spin_lock_init(&ls->ls_recover_list_lock);
543 ls->ls_recover_list_count = 0;
597d0cae 544 ls->ls_local_handle = ls;
e7fd4179
DT
545 init_waitqueue_head(&ls->ls_wait_general);
546 INIT_LIST_HEAD(&ls->ls_root_list);
547 init_rwsem(&ls->ls_root_sem);
548
549 down_write(&ls->ls_in_recovery);
550
5f88f1ea 551 spin_lock(&lslist_lock);
0f8e0d9a 552 ls->ls_create_count = 1;
5f88f1ea
DT
553 list_add(&ls->ls_list, &lslist);
554 spin_unlock(&lslist_lock);
555
556 /* needs to find ls in lslist */
e7fd4179
DT
557 error = dlm_recoverd_start(ls);
558 if (error) {
559 log_error(ls, "can't start dlm_recoverd %d", error);
79d72b54 560 goto out_delist;
e7fd4179
DT
561 }
562
901195ed
GKH
563 ls->ls_kobj.kset = dlm_kset;
564 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
565 "%s", ls->ls_name);
e7fd4179 566 if (error)
79d72b54 567 goto out_stop;
901195ed 568 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
79d72b54
DT
569
570 /* let kobject handle freeing of ls if there's an error */
571 do_unreg = 1;
e7fd4179 572
8b0e7b2c
DT
573 /* This uevent triggers dlm_controld in userspace to add us to the
574 group of nodes that are members of this lockspace (managed by the
575 cluster infrastructure.) Once it's done that, it tells us who the
576 current lockspace members are (via configfs) and then tells the
577 lockspace to start running (via sysfs) in dlm_ls_start(). */
578
e7fd4179
DT
579 error = do_uevent(ls, 1);
580 if (error)
79d72b54
DT
581 goto out_stop;
582
8b0e7b2c
DT
583 wait_for_completion(&ls->ls_members_done);
584 error = ls->ls_members_result;
585 if (error)
586 goto out_members;
587
79d72b54
DT
588 dlm_create_debug_file(ls);
589
590 log_debug(ls, "join complete");
e7fd4179
DT
591 *lockspace = ls;
592 return 0;
593
8b0e7b2c
DT
594 out_members:
595 do_uevent(ls, 0);
596 dlm_clear_members(ls);
597 kfree(ls->ls_node_array);
79d72b54 598 out_stop:
5f88f1ea 599 dlm_recoverd_stop(ls);
79d72b54 600 out_delist:
e7fd4179
DT
601 spin_lock(&lslist_lock);
602 list_del(&ls->ls_list);
603 spin_unlock(&lslist_lock);
e7fd4179
DT
604 kfree(ls->ls_recover_buf);
605 out_dirfree:
606 kfree(ls->ls_dirtbl);
607 out_lkbfree:
608 kfree(ls->ls_lkbtbl);
609 out_rsbfree:
610 kfree(ls->ls_rsbtbl);
611 out_lsfree:
79d72b54 612 if (do_unreg)
197b12d6 613 kobject_put(&ls->ls_kobj);
79d72b54
DT
614 else
615 kfree(ls);
e7fd4179
DT
616 out:
617 module_put(THIS_MODULE);
618 return error;
619}
620
08ce4c91 621int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
622 uint32_t flags, int lvblen)
623{
624 int error = 0;
625
90135925 626 mutex_lock(&ls_lock);
e7fd4179
DT
627 if (!ls_count)
628 error = threads_start();
629 if (error)
630 goto out;
631
632 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
633 if (!error)
634 ls_count++;
8511a272
DT
635 if (error > 0)
636 error = 0;
637 if (!ls_count)
8b0e7b2c 638 threads_stop();
e7fd4179 639 out:
90135925 640 mutex_unlock(&ls_lock);
e7fd4179
DT
641 return error;
642}
643
644/* Return 1 if the lockspace still has active remote locks,
645 * 2 if the lockspace still has active local locks.
646 */
647static int lockspace_busy(struct dlm_ls *ls)
648{
649 int i, lkb_found = 0;
650 struct dlm_lkb *lkb;
651
652 /* NOTE: We check the lockidtbl here rather than the resource table.
653 This is because there may be LKBs queued as ASTs that have been
654 unlinked from their RSBs and are pending deletion once the AST has
655 been delivered */
656
657 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
658 read_lock(&ls->ls_lkbtbl[i].lock);
659 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
660 lkb_found = 1;
661 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
662 lkb_idtbl_list) {
663 if (!lkb->lkb_nodeid) {
664 read_unlock(&ls->ls_lkbtbl[i].lock);
665 return 2;
666 }
667 }
668 }
669 read_unlock(&ls->ls_lkbtbl[i].lock);
670 }
671 return lkb_found;
672}
673
674static int release_lockspace(struct dlm_ls *ls, int force)
675{
676 struct dlm_lkb *lkb;
677 struct dlm_rsb *rsb;
678 struct list_head *head;
0f8e0d9a
DT
679 int i, busy, rv;
680
681 busy = lockspace_busy(ls);
682
683 spin_lock(&lslist_lock);
684 if (ls->ls_create_count == 1) {
685 if (busy > force)
686 rv = -EBUSY;
687 else {
688 /* remove_lockspace takes ls off lslist */
689 ls->ls_create_count = 0;
690 rv = 0;
691 }
692 } else if (ls->ls_create_count > 1) {
693 rv = --ls->ls_create_count;
694 } else {
695 rv = -EINVAL;
696 }
697 spin_unlock(&lslist_lock);
698
699 if (rv) {
700 log_debug(ls, "release_lockspace no remove %d", rv);
701 return rv;
702 }
e7fd4179 703
0f8e0d9a 704 dlm_device_deregister(ls);
e7fd4179 705
dc68c7ed 706 if (force < 3 && dlm_user_daemon_available())
e7fd4179
DT
707 do_uevent(ls, 0);
708
709 dlm_recoverd_stop(ls);
710
711 remove_lockspace(ls);
712
713 dlm_delete_debug_file(ls);
714
715 dlm_astd_suspend();
716
717 kfree(ls->ls_recover_buf);
718
719 /*
720 * Free direntry structs.
721 */
722
723 dlm_dir_clear(ls);
724 kfree(ls->ls_dirtbl);
725
726 /*
727 * Free all lkb's on lkbtbl[] lists.
728 */
729
730 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
731 head = &ls->ls_lkbtbl[i].list;
732 while (!list_empty(head)) {
733 lkb = list_entry(head->next, struct dlm_lkb,
734 lkb_idtbl_list);
735
736 list_del(&lkb->lkb_idtbl_list);
737
738 dlm_del_ast(lkb);
739
740 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
52bda2b5 741 dlm_free_lvb(lkb->lkb_lvbptr);
e7fd4179 742
52bda2b5 743 dlm_free_lkb(lkb);
e7fd4179
DT
744 }
745 }
746 dlm_astd_resume();
747
748 kfree(ls->ls_lkbtbl);
749
750 /*
751 * Free all rsb's on rsbtbl[] lists
752 */
753
754 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
755 head = &ls->ls_rsbtbl[i].list;
756 while (!list_empty(head)) {
757 rsb = list_entry(head->next, struct dlm_rsb,
758 res_hashchain);
759
760 list_del(&rsb->res_hashchain);
52bda2b5 761 dlm_free_rsb(rsb);
e7fd4179
DT
762 }
763
764 head = &ls->ls_rsbtbl[i].toss;
765 while (!list_empty(head)) {
766 rsb = list_entry(head->next, struct dlm_rsb,
767 res_hashchain);
768 list_del(&rsb->res_hashchain);
52bda2b5 769 dlm_free_rsb(rsb);
e7fd4179
DT
770 }
771 }
772
773 kfree(ls->ls_rsbtbl);
774
775 /*
776 * Free structures on any other lists
777 */
778
2896ee37 779 dlm_purge_requestqueue(ls);
e7fd4179
DT
780 kfree(ls->ls_recover_args);
781 dlm_clear_free_entries(ls);
782 dlm_clear_members(ls);
783 dlm_clear_members_gone(ls);
784 kfree(ls->ls_node_array);
0f8e0d9a 785 log_debug(ls, "release_lockspace final free");
197b12d6 786 kobject_put(&ls->ls_kobj);
79d72b54 787 /* The ls structure will be freed when the kobject is done with */
e7fd4179 788
e7fd4179
DT
789 module_put(THIS_MODULE);
790 return 0;
791}
792
793/*
794 * Called when a system has released all its locks and is not going to use the
795 * lockspace any longer. We free everything we're managing for this lockspace.
796 * Remaining nodes will go through the recovery process as if we'd died. The
797 * lockspace must continue to function as usual, participating in recoveries,
798 * until this returns.
799 *
800 * Force has 4 possible values:
801 * 0 - don't destroy locksapce if it has any LKBs
802 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
803 * 2 - destroy lockspace regardless of LKBs
804 * 3 - destroy lockspace as part of a forced shutdown
805 */
806
807int dlm_release_lockspace(void *lockspace, int force)
808{
809 struct dlm_ls *ls;
0f8e0d9a 810 int error;
e7fd4179
DT
811
812 ls = dlm_find_lockspace_local(lockspace);
813 if (!ls)
814 return -EINVAL;
815 dlm_put_lockspace(ls);
0f8e0d9a
DT
816
817 mutex_lock(&ls_lock);
818 error = release_lockspace(ls, force);
819 if (!error)
820 ls_count--;
278afcbf 821 if (!ls_count)
0f8e0d9a
DT
822 threads_stop();
823 mutex_unlock(&ls_lock);
824
825 return error;
e7fd4179
DT
826}
827
dc68c7ed
DT
828void dlm_stop_lockspaces(void)
829{
830 struct dlm_ls *ls;
831
832 restart:
833 spin_lock(&lslist_lock);
834 list_for_each_entry(ls, &lslist, ls_list) {
835 if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
836 continue;
837 spin_unlock(&lslist_lock);
838 log_error(ls, "no userland control daemon, stopping lockspace");
839 dlm_ls_stop(ls);
840 goto restart;
841 }
842 spin_unlock(&lslist_lock);
843}
844