]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/gfs2/rgrp.c
[GFS2] Remove unused prototype
[net-next-2.6.git] / fs / gfs2 / rgrp.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <asm/semaphore.h>
16
17#include "gfs2.h"
18#include "bits.h"
19#include "glock.h"
20#include "glops.h"
21#include "jdata.h"
22#include "lops.h"
23#include "meta_io.h"
24#include "quota.h"
25#include "rgrp.h"
26#include "super.h"
27#include "trans.h"
28
29/**
30 * gfs2_rgrp_verify - Verify that a resource group is consistent
31 * @sdp: the filesystem
32 * @rgd: the rgrp
33 *
34 */
35
36void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
37{
38 struct gfs2_sbd *sdp = rgd->rd_sbd;
39 struct gfs2_bitmap *bi = NULL;
40 uint32_t length = rgd->rd_ri.ri_length;
41 uint32_t count[4], tmp;
42 int buf, x;
43
44 memset(count, 0, 4 * sizeof(uint32_t));
45
46 /* Count # blocks in each of 4 possible allocation states */
47 for (buf = 0; buf < length; buf++) {
48 bi = rgd->rd_bits + buf;
49 for (x = 0; x < 4; x++)
50 count[x] += gfs2_bitcount(rgd,
51 bi->bi_bh->b_data +
52 bi->bi_offset,
53 bi->bi_len, x);
54 }
55
56 if (count[0] != rgd->rd_rg.rg_free) {
57 if (gfs2_consist_rgrpd(rgd))
58 fs_err(sdp, "free data mismatch: %u != %u\n",
59 count[0], rgd->rd_rg.rg_free);
60 return;
61 }
62
63 tmp = rgd->rd_ri.ri_data -
64 rgd->rd_rg.rg_free -
65 rgd->rd_rg.rg_dinodes;
66 if (count[1] != tmp) {
67 if (gfs2_consist_rgrpd(rgd))
68 fs_err(sdp, "used data mismatch: %u != %u\n",
69 count[1], tmp);
70 return;
71 }
72
73 if (count[2]) {
74 if (gfs2_consist_rgrpd(rgd))
75 fs_err(sdp, "free metadata mismatch: %u != 0\n",
76 count[2]);
77 return;
78 }
79
80 if (count[3] != rgd->rd_rg.rg_dinodes) {
81 if (gfs2_consist_rgrpd(rgd))
82 fs_err(sdp, "used metadata mismatch: %u != %u\n",
83 count[3], rgd->rd_rg.rg_dinodes);
84 return;
85 }
86}
87
88static inline int rgrp_contains_block(struct gfs2_rindex *ri, uint64_t block)
89{
90 uint64_t first = ri->ri_data0;
91 uint64_t last = first + ri->ri_data;
92 return !!(first <= block && block < last);
93}
94
95/**
96 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
97 * @sdp: The GFS2 superblock
98 * @n: The data block number
99 *
100 * Returns: The resource group, or NULL if not found
101 */
102
103struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, uint64_t blk)
104{
105 struct gfs2_rgrpd *rgd;
106
107 spin_lock(&sdp->sd_rindex_spin);
108
109 list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
110 if (rgrp_contains_block(&rgd->rd_ri, blk)) {
111 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
112 spin_unlock(&sdp->sd_rindex_spin);
113 return rgd;
114 }
115 }
116
117 spin_unlock(&sdp->sd_rindex_spin);
118
119 return NULL;
120}
121
122/**
123 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
124 * @sdp: The GFS2 superblock
125 *
126 * Returns: The first rgrp in the filesystem
127 */
128
129struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
130{
131 gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
132 return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
133}
134
135/**
136 * gfs2_rgrpd_get_next - get the next RG
137 * @rgd: A RG
138 *
139 * Returns: The next rgrp
140 */
141
142struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
143{
144 if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
145 return NULL;
146 return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
147}
148
149static void clear_rgrpdi(struct gfs2_sbd *sdp)
150{
151 struct list_head *head;
152 struct gfs2_rgrpd *rgd;
153 struct gfs2_glock *gl;
154
155 spin_lock(&sdp->sd_rindex_spin);
156 sdp->sd_rindex_forward = NULL;
157 head = &sdp->sd_rindex_recent_list;
158 while (!list_empty(head)) {
159 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
160 list_del(&rgd->rd_recent);
161 }
162 spin_unlock(&sdp->sd_rindex_spin);
163
164 head = &sdp->sd_rindex_list;
165 while (!list_empty(head)) {
166 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
167 gl = rgd->rd_gl;
168
169 list_del(&rgd->rd_list);
170 list_del(&rgd->rd_list_mru);
171
172 if (gl) {
173 set_gl2rgd(gl, NULL);
174 gfs2_glock_put(gl);
175 }
176
177 kfree(rgd->rd_bits);
178 kfree(rgd);
179 }
180}
181
182void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
183{
184 down(&sdp->sd_rindex_mutex);
185 clear_rgrpdi(sdp);
186 up(&sdp->sd_rindex_mutex);
187}
188
189/**
190 * gfs2_compute_bitstructs - Compute the bitmap sizes
191 * @rgd: The resource group descriptor
192 *
193 * Calculates bitmap descriptors, one for each block that contains bitmap data
194 *
195 * Returns: errno
196 */
197
198static int compute_bitstructs(struct gfs2_rgrpd *rgd)
199{
200 struct gfs2_sbd *sdp = rgd->rd_sbd;
201 struct gfs2_bitmap *bi;
202 uint32_t length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */
203 uint32_t bytes_left, bytes;
204 int x;
205
206 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_KERNEL);
207 if (!rgd->rd_bits)
208 return -ENOMEM;
209
210 bytes_left = rgd->rd_ri.ri_bitbytes;
211
212 for (x = 0; x < length; x++) {
213 bi = rgd->rd_bits + x;
214
215 /* small rgrp; bitmap stored completely in header block */
216 if (length == 1) {
217 bytes = bytes_left;
218 bi->bi_offset = sizeof(struct gfs2_rgrp);
219 bi->bi_start = 0;
220 bi->bi_len = bytes;
221 /* header block */
222 } else if (x == 0) {
223 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
224 bi->bi_offset = sizeof(struct gfs2_rgrp);
225 bi->bi_start = 0;
226 bi->bi_len = bytes;
227 /* last block */
228 } else if (x + 1 == length) {
229 bytes = bytes_left;
230 bi->bi_offset = sizeof(struct gfs2_meta_header);
231 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
232 bi->bi_len = bytes;
233 /* other blocks */
234 } else {
235 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
236 bi->bi_offset = sizeof(struct gfs2_meta_header);
237 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
238 bi->bi_len = bytes;
239 }
240
241 bytes_left -= bytes;
242 }
243
244 if (bytes_left) {
245 gfs2_consist_rgrpd(rgd);
246 return -EIO;
247 }
248 bi = rgd->rd_bits + (length - 1);
249 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) {
250 if (gfs2_consist_rgrpd(rgd)) {
251 gfs2_rindex_print(&rgd->rd_ri);
252 fs_err(sdp, "start=%u len=%u offset=%u\n",
253 bi->bi_start, bi->bi_len, bi->bi_offset);
254 }
255 return -EIO;
256 }
257
258 return 0;
259}
260
261/**
262 * gfs2_ri_update - Pull in a new resource index from the disk
263 * @gl: The glock covering the rindex inode
264 *
265 * Returns: 0 on successful update, error code otherwise
266 */
267
268static int gfs2_ri_update(struct gfs2_inode *ip)
269{
270 struct gfs2_sbd *sdp = ip->i_sbd;
271 struct gfs2_rgrpd *rgd;
272 char buf[sizeof(struct gfs2_rindex)];
273 uint64_t junk = ip->i_di.di_size;
274 int error;
275
276 if (do_div(junk, sizeof(struct gfs2_rindex))) {
277 gfs2_consist_inode(ip);
278 return -EIO;
279 }
280
281 clear_rgrpdi(sdp);
282
283 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
284 error = gfs2_jdata_read_mem(ip, buf,
285 sdp->sd_rgrps *
286 sizeof(struct gfs2_rindex),
287 sizeof(struct gfs2_rindex));
288 if (!error)
289 break;
290 if (error != sizeof(struct gfs2_rindex)) {
291 if (error > 0)
292 error = -EIO;
293 goto fail;
294 }
295
296 rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_KERNEL);
297 error = -ENOMEM;
298 if (!rgd)
299 goto fail;
300
301 init_MUTEX(&rgd->rd_mutex);
302 lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
303 rgd->rd_sbd = sdp;
304
305 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
306 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
307
308 gfs2_rindex_in(&rgd->rd_ri, buf);
309
310 error = compute_bitstructs(rgd);
311 if (error)
312 goto fail;
313
314 error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr,
315 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
316 if (error)
317 goto fail;
318
319 set_gl2rgd(rgd->rd_gl, rgd);
320 rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
321 }
322
323 sdp->sd_rindex_vn = ip->i_gl->gl_vn;
324
325 return 0;
326
327 fail:
328 clear_rgrpdi(sdp);
329
330 return error;
331}
332
333/**
334 * gfs2_rindex_hold - Grab a lock on the rindex
335 * @sdp: The GFS2 superblock
336 * @ri_gh: the glock holder
337 *
338 * We grab a lock on the rindex inode to make sure that it doesn't
339 * change whilst we are performing an operation. We keep this lock
340 * for quite long periods of time compared to other locks. This
341 * doesn't matter, since it is shared and it is very, very rarely
342 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
343 *
344 * This makes sure that we're using the latest copy of the resource index
345 * special file, which might have been updated if someone expanded the
346 * filesystem (via gfs2_grow utility), which adds new resource groups.
347 *
348 * Returns: 0 on success, error code otherwise
349 */
350
351int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
352{
353 struct gfs2_inode *ip = sdp->sd_rindex;
354 struct gfs2_glock *gl = ip->i_gl;
355 int error;
356
357 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
358 if (error)
359 return error;
360
361 /* Read new copy from disk if we don't have the latest */
362 if (sdp->sd_rindex_vn != gl->gl_vn) {
363 down(&sdp->sd_rindex_mutex);
364 if (sdp->sd_rindex_vn != gl->gl_vn) {
365 error = gfs2_ri_update(ip);
366 if (error)
367 gfs2_glock_dq_uninit(ri_gh);
368 }
369 up(&sdp->sd_rindex_mutex);
370 }
371
372 return error;
373}
374
375/**
376 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
377 * @rgd: the struct gfs2_rgrpd describing the RG to read in
378 *
379 * Read in all of a Resource Group's header and bitmap blocks.
380 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
381 *
382 * Returns: errno
383 */
384
385int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
386{
387 struct gfs2_sbd *sdp = rgd->rd_sbd;
388 struct gfs2_glock *gl = rgd->rd_gl;
389 unsigned int length = rgd->rd_ri.ri_length;
390 struct gfs2_bitmap *bi;
391 unsigned int x, y;
392 int error;
393
394 down(&rgd->rd_mutex);
395
396 spin_lock(&sdp->sd_rindex_spin);
397 if (rgd->rd_bh_count) {
398 rgd->rd_bh_count++;
399 spin_unlock(&sdp->sd_rindex_spin);
400 up(&rgd->rd_mutex);
401 return 0;
402 }
403 spin_unlock(&sdp->sd_rindex_spin);
404
405 for (x = 0; x < length; x++) {
406 bi = rgd->rd_bits + x;
407 error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, DIO_START,
408 &bi->bi_bh);
409 if (error)
410 goto fail;
411 }
412
413 for (y = length; y--;) {
414 bi = rgd->rd_bits + y;
415 error = gfs2_meta_reread(sdp, bi->bi_bh, DIO_WAIT);
416 if (error)
417 goto fail;
418 if (gfs2_metatype_check(sdp, bi->bi_bh,
419 (y) ? GFS2_METATYPE_RB :
420 GFS2_METATYPE_RG)) {
421 error = -EIO;
422 goto fail;
423 }
424 }
425
426 if (rgd->rd_rg_vn != gl->gl_vn) {
427 gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
428 rgd->rd_rg_vn = gl->gl_vn;
429 }
430
431 spin_lock(&sdp->sd_rindex_spin);
432 rgd->rd_free_clone = rgd->rd_rg.rg_free;
433 rgd->rd_bh_count++;
434 spin_unlock(&sdp->sd_rindex_spin);
435
436 up(&rgd->rd_mutex);
437
438 return 0;
439
440 fail:
441 while (x--) {
442 bi = rgd->rd_bits + x;
443 brelse(bi->bi_bh);
444 bi->bi_bh = NULL;
445 gfs2_assert_warn(sdp, !bi->bi_clone);
446 }
447 up(&rgd->rd_mutex);
448
449 return error;
450}
451
452void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
453{
454 struct gfs2_sbd *sdp = rgd->rd_sbd;
455
456 spin_lock(&sdp->sd_rindex_spin);
457 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
458 rgd->rd_bh_count++;
459 spin_unlock(&sdp->sd_rindex_spin);
460}
461
462/**
463 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
464 * @rgd: the struct gfs2_rgrpd describing the RG to read in
465 *
466 */
467
468void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
469{
470 struct gfs2_sbd *sdp = rgd->rd_sbd;
471 int x, length = rgd->rd_ri.ri_length;
472
473 spin_lock(&sdp->sd_rindex_spin);
474 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
475 if (--rgd->rd_bh_count) {
476 spin_unlock(&sdp->sd_rindex_spin);
477 return;
478 }
479
480 for (x = 0; x < length; x++) {
481 struct gfs2_bitmap *bi = rgd->rd_bits + x;
482 kfree(bi->bi_clone);
483 bi->bi_clone = NULL;
484 brelse(bi->bi_bh);
485 bi->bi_bh = NULL;
486 }
487
488 spin_unlock(&sdp->sd_rindex_spin);
489}
490
491void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
492{
493 struct gfs2_sbd *sdp = rgd->rd_sbd;
494 unsigned int length = rgd->rd_ri.ri_length;
495 unsigned int x;
496
497 for (x = 0; x < length; x++) {
498 struct gfs2_bitmap *bi = rgd->rd_bits + x;
499 if (!bi->bi_clone)
500 continue;
501 memcpy(bi->bi_clone + bi->bi_offset,
502 bi->bi_bh->b_data + bi->bi_offset,
503 bi->bi_len);
504 }
505
506 spin_lock(&sdp->sd_rindex_spin);
507 rgd->rd_free_clone = rgd->rd_rg.rg_free;
508 spin_unlock(&sdp->sd_rindex_spin);
509}
510
511/**
512 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
513 * @ip: the incore GFS2 inode structure
514 *
515 * Returns: the struct gfs2_alloc
516 */
517
518struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
519{
520 struct gfs2_alloc *al = &ip->i_alloc;
521
522 /* FIXME: Should assert that the correct locks are held here... */
523 memset(al, 0, sizeof(*al));
524 return al;
525}
526
527/**
528 * gfs2_alloc_put - throw away the struct gfs2_alloc for an inode
529 * @ip: the inode
530 *
531 */
532
533void gfs2_alloc_put(struct gfs2_inode *ip)
534{
535 return;
536}
537
538/**
539 * try_rgrp_fit - See if a given reservation will fit in a given RG
540 * @rgd: the RG data
541 * @al: the struct gfs2_alloc structure describing the reservation
542 *
543 * If there's room for the requested blocks to be allocated from the RG:
544 * Sets the $al_reserved_data field in @al.
545 * Sets the $al_reserved_meta field in @al.
546 * Sets the $al_rgd field in @al.
547 *
548 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
549 */
550
551static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
552{
553 struct gfs2_sbd *sdp = rgd->rd_sbd;
554 int ret = 0;
555
556 spin_lock(&sdp->sd_rindex_spin);
557 if (rgd->rd_free_clone >= al->al_requested) {
558 al->al_rgd = rgd;
559 ret = 1;
560 }
561 spin_unlock(&sdp->sd_rindex_spin);
562
563 return ret;
564}
565
566/**
567 * recent_rgrp_first - get first RG from "recent" list
568 * @sdp: The GFS2 superblock
569 * @rglast: address of the rgrp used last
570 *
571 * Returns: The first rgrp in the recent list
572 */
573
574static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
575 uint64_t rglast)
576{
577 struct gfs2_rgrpd *rgd = NULL;
578
579 spin_lock(&sdp->sd_rindex_spin);
580
581 if (list_empty(&sdp->sd_rindex_recent_list))
582 goto out;
583
584 if (!rglast)
585 goto first;
586
587 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
588 if (rgd->rd_ri.ri_addr == rglast)
589 goto out;
590 }
591
592 first:
593 rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
594 rd_recent);
595
596 out:
597 spin_unlock(&sdp->sd_rindex_spin);
598
599 return rgd;
600}
601
602/**
603 * recent_rgrp_next - get next RG from "recent" list
604 * @cur_rgd: current rgrp
605 * @remove:
606 *
607 * Returns: The next rgrp in the recent list
608 */
609
610static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
611 int remove)
612{
613 struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
614 struct list_head *head;
615 struct gfs2_rgrpd *rgd;
616
617 spin_lock(&sdp->sd_rindex_spin);
618
619 head = &sdp->sd_rindex_recent_list;
620
621 list_for_each_entry(rgd, head, rd_recent) {
622 if (rgd == cur_rgd) {
623 if (cur_rgd->rd_recent.next != head)
624 rgd = list_entry(cur_rgd->rd_recent.next,
625 struct gfs2_rgrpd, rd_recent);
626 else
627 rgd = NULL;
628
629 if (remove)
630 list_del(&cur_rgd->rd_recent);
631
632 goto out;
633 }
634 }
635
636 rgd = NULL;
637 if (!list_empty(head))
638 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
639
640 out:
641 spin_unlock(&sdp->sd_rindex_spin);
642
643 return rgd;
644}
645
646/**
647 * recent_rgrp_add - add an RG to tail of "recent" list
648 * @new_rgd: The rgrp to add
649 *
650 */
651
652static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
653{
654 struct gfs2_sbd *sdp = new_rgd->rd_sbd;
655 struct gfs2_rgrpd *rgd;
656 unsigned int count = 0;
657 unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
658
659 spin_lock(&sdp->sd_rindex_spin);
660
661 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
662 if (rgd == new_rgd)
663 goto out;
664
665 if (++count >= max)
666 goto out;
667 }
668 list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
669
670 out:
671 spin_unlock(&sdp->sd_rindex_spin);
672}
673
674/**
675 * forward_rgrp_get - get an rgrp to try next from full list
676 * @sdp: The GFS2 superblock
677 *
678 * Returns: The rgrp to try next
679 */
680
681static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
682{
683 struct gfs2_rgrpd *rgd;
684 unsigned int journals = gfs2_jindex_size(sdp);
685 unsigned int rg = 0, x;
686
687 spin_lock(&sdp->sd_rindex_spin);
688
689 rgd = sdp->sd_rindex_forward;
690 if (!rgd) {
691 if (sdp->sd_rgrps >= journals)
692 rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
693
694 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp);
695 x < rg;
696 x++, rgd = gfs2_rgrpd_get_next(rgd))
697 /* Do Nothing */;
698
699 sdp->sd_rindex_forward = rgd;
700 }
701
702 spin_unlock(&sdp->sd_rindex_spin);
703
704 return rgd;
705}
706
707/**
708 * forward_rgrp_set - set the forward rgrp pointer
709 * @sdp: the filesystem
710 * @rgd: The new forward rgrp
711 *
712 */
713
714static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
715{
716 spin_lock(&sdp->sd_rindex_spin);
717 sdp->sd_rindex_forward = rgd;
718 spin_unlock(&sdp->sd_rindex_spin);
719}
720
721/**
722 * get_local_rgrp - Choose and lock a rgrp for allocation
723 * @ip: the inode to reserve space for
724 * @rgp: the chosen and locked rgrp
725 *
726 * Try to acquire rgrp in way which avoids contending with others.
727 *
728 * Returns: errno
729 */
730
731static int get_local_rgrp(struct gfs2_inode *ip)
732{
733 struct gfs2_sbd *sdp = ip->i_sbd;
734 struct gfs2_rgrpd *rgd, *begin = NULL;
735 struct gfs2_alloc *al = &ip->i_alloc;
736 int flags = LM_FLAG_TRY;
737 int skipped = 0;
738 int loops = 0;
739 int error;
740
741 /* Try recently successful rgrps */
742
743 rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
744
745 while (rgd) {
746 error = gfs2_glock_nq_init(rgd->rd_gl,
747 LM_ST_EXCLUSIVE, LM_FLAG_TRY,
748 &al->al_rgd_gh);
749 switch (error) {
750 case 0:
751 if (try_rgrp_fit(rgd, al))
752 goto out;
753 gfs2_glock_dq_uninit(&al->al_rgd_gh);
754 rgd = recent_rgrp_next(rgd, 1);
755 break;
756
757 case GLR_TRYFAILED:
758 rgd = recent_rgrp_next(rgd, 0);
759 break;
760
761 default:
762 return error;
763 }
764 }
765
766 /* Go through full list of rgrps */
767
768 begin = rgd = forward_rgrp_get(sdp);
769
770 for (;;) {
771 error = gfs2_glock_nq_init(rgd->rd_gl,
772 LM_ST_EXCLUSIVE, flags,
773 &al->al_rgd_gh);
774 switch (error) {
775 case 0:
776 if (try_rgrp_fit(rgd, al))
777 goto out;
778 gfs2_glock_dq_uninit(&al->al_rgd_gh);
779 break;
780
781 case GLR_TRYFAILED:
782 skipped++;
783 break;
784
785 default:
786 return error;
787 }
788
789 rgd = gfs2_rgrpd_get_next(rgd);
790 if (!rgd)
791 rgd = gfs2_rgrpd_get_first(sdp);
792
793 if (rgd == begin) {
794 if (++loops >= 2 || !skipped)
795 return -ENOSPC;
796 flags = 0;
797 }
798 }
799
800 out:
801 ip->i_last_rg_alloc = rgd->rd_ri.ri_addr;
802
803 if (begin) {
804 recent_rgrp_add(rgd);
805 rgd = gfs2_rgrpd_get_next(rgd);
806 if (!rgd)
807 rgd = gfs2_rgrpd_get_first(sdp);
808 forward_rgrp_set(sdp, rgd);
809 }
810
811 return 0;
812}
813
814/**
815 * gfs2_inplace_reserve_i - Reserve space in the filesystem
816 * @ip: the inode to reserve space for
817 *
818 * Returns: errno
819 */
820
821int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
822{
823 struct gfs2_sbd *sdp = ip->i_sbd;
824 struct gfs2_alloc *al = &ip->i_alloc;
825 int error;
826
827 if (gfs2_assert_warn(sdp, al->al_requested))
828 return -EINVAL;
829
830 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
831 if (error)
832 return error;
833
834 error = get_local_rgrp(ip);
835 if (error) {
836 gfs2_glock_dq_uninit(&al->al_ri_gh);
837 return error;
838 }
839
840 al->al_file = file;
841 al->al_line = line;
842
843 return 0;
844}
845
846/**
847 * gfs2_inplace_release - release an inplace reservation
848 * @ip: the inode the reservation was taken out on
849 *
850 * Release a reservation made by gfs2_inplace_reserve().
851 */
852
853void gfs2_inplace_release(struct gfs2_inode *ip)
854{
855 struct gfs2_sbd *sdp = ip->i_sbd;
856 struct gfs2_alloc *al = &ip->i_alloc;
857
858 if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
859 fs_warn(sdp, "al_alloced = %u, al_requested = %u "
860 "al_file = %s, al_line = %u\n",
861 al->al_alloced, al->al_requested, al->al_file,
862 al->al_line);
863
864 al->al_rgd = NULL;
865 gfs2_glock_dq_uninit(&al->al_rgd_gh);
866 gfs2_glock_dq_uninit(&al->al_ri_gh);
867}
868
869/**
870 * gfs2_get_block_type - Check a block in a RG is of given type
871 * @rgd: the resource group holding the block
872 * @block: the block number
873 *
874 * Returns: The block type (GFS2_BLKST_*)
875 */
876
877unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, uint64_t block)
878{
879 struct gfs2_bitmap *bi = NULL;
880 uint32_t length, rgrp_block, buf_block;
881 unsigned int buf;
882 unsigned char type;
883
884 length = rgd->rd_ri.ri_length;
885 rgrp_block = block - rgd->rd_ri.ri_data0;
886
887 for (buf = 0; buf < length; buf++) {
888 bi = rgd->rd_bits + buf;
889 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
890 break;
891 }
892
893 gfs2_assert(rgd->rd_sbd, buf < length);
894 buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
895
896 type = gfs2_testbit(rgd,
897 bi->bi_bh->b_data + bi->bi_offset,
898 bi->bi_len, buf_block);
899
900 return type;
901}
902
903/**
904 * rgblk_search - find a block in @old_state, change allocation
905 * state to @new_state
906 * @rgd: the resource group descriptor
907 * @goal: the goal block within the RG (start here to search for avail block)
908 * @old_state: GFS2_BLKST_XXX the before-allocation state to find
909 * @new_state: GFS2_BLKST_XXX the after-allocation block state
910 *
911 * Walk rgrp's bitmap to find bits that represent a block in @old_state.
912 * Add the found bitmap buffer to the transaction.
913 * Set the found bits to @new_state to change block's allocation state.
914 *
915 * This function never fails, because we wouldn't call it unless we
916 * know (from reservation results, etc.) that a block is available.
917 *
918 * Scope of @goal and returned block is just within rgrp, not the whole
919 * filesystem.
920 *
921 * Returns: the block number allocated
922 */
923
924static uint32_t rgblk_search(struct gfs2_rgrpd *rgd, uint32_t goal,
925 unsigned char old_state, unsigned char new_state)
926{
927 struct gfs2_bitmap *bi = NULL;
928 uint32_t length = rgd->rd_ri.ri_length;
929 uint32_t blk = 0;
930 unsigned int buf, x;
931
932 /* Find bitmap block that contains bits for goal block */
933 for (buf = 0; buf < length; buf++) {
934 bi = rgd->rd_bits + buf;
935 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
936 break;
937 }
938
939 gfs2_assert(rgd->rd_sbd, buf < length);
940
941 /* Convert scope of "goal" from rgrp-wide to within found bit block */
942 goal -= bi->bi_start * GFS2_NBBY;
943
944 /* Search (up to entire) bitmap in this rgrp for allocatable block.
945 "x <= length", instead of "x < length", because we typically start
946 the search in the middle of a bit block, but if we can't find an
947 allocatable block anywhere else, we want to be able wrap around and
948 search in the first part of our first-searched bit block. */
949 for (x = 0; x <= length; x++) {
950 if (bi->bi_clone)
951 blk = gfs2_bitfit(rgd,
952 bi->bi_clone + bi->bi_offset,
953 bi->bi_len, goal, old_state);
954 else
955 blk = gfs2_bitfit(rgd,
956 bi->bi_bh->b_data + bi->bi_offset,
957 bi->bi_len, goal, old_state);
958 if (blk != BFITNOENT)
959 break;
960
961 /* Try next bitmap block (wrap back to rgrp header if at end) */
962 buf = (buf + 1) % length;
963 bi = rgd->rd_bits + buf;
964 goal = 0;
965 }
966
967 if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length))
968 blk = 0;
969
d4e9c4c3 970 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
b3b94faa
DT
971 gfs2_setbit(rgd,
972 bi->bi_bh->b_data + bi->bi_offset,
973 bi->bi_len, blk, new_state);
974 if (bi->bi_clone)
975 gfs2_setbit(rgd,
976 bi->bi_clone + bi->bi_offset,
977 bi->bi_len, blk, new_state);
978
979 return bi->bi_start * GFS2_NBBY + blk;
980}
981
982/**
983 * rgblk_free - Change alloc state of given block(s)
984 * @sdp: the filesystem
985 * @bstart: the start of a run of blocks to free
986 * @blen: the length of the block run (all must lie within ONE RG!)
987 * @new_state: GFS2_BLKST_XXX the after-allocation block state
988 *
989 * Returns: Resource group containing the block(s)
990 */
991
992static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, uint64_t bstart,
993 uint32_t blen, unsigned char new_state)
994{
995 struct gfs2_rgrpd *rgd;
996 struct gfs2_bitmap *bi = NULL;
997 uint32_t length, rgrp_blk, buf_blk;
998 unsigned int buf;
999
1000 rgd = gfs2_blk2rgrpd(sdp, bstart);
1001 if (!rgd) {
1002 if (gfs2_consist(sdp))
1003 fs_err(sdp, "block = %llu\n", bstart);
1004 return NULL;
1005 }
1006
1007 length = rgd->rd_ri.ri_length;
1008
1009 rgrp_blk = bstart - rgd->rd_ri.ri_data0;
1010
1011 while (blen--) {
1012 for (buf = 0; buf < length; buf++) {
1013 bi = rgd->rd_bits + buf;
1014 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1015 break;
1016 }
1017
1018 gfs2_assert(rgd->rd_sbd, buf < length);
1019
1020 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1021 rgrp_blk++;
1022
1023 if (!bi->bi_clone) {
1024 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
1025 GFP_KERNEL | __GFP_NOFAIL);
1026 memcpy(bi->bi_clone + bi->bi_offset,
1027 bi->bi_bh->b_data + bi->bi_offset,
1028 bi->bi_len);
1029 }
d4e9c4c3 1030 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
b3b94faa
DT
1031 gfs2_setbit(rgd,
1032 bi->bi_bh->b_data + bi->bi_offset,
1033 bi->bi_len, buf_blk, new_state);
1034 }
1035
1036 return rgd;
1037}
1038
1039/**
1040 * gfs2_alloc_data - Allocate a data block
1041 * @ip: the inode to allocate the data block for
1042 *
1043 * Returns: the allocated block
1044 */
1045
1046uint64_t gfs2_alloc_data(struct gfs2_inode *ip)
1047{
1048 struct gfs2_sbd *sdp = ip->i_sbd;
1049 struct gfs2_alloc *al = &ip->i_alloc;
1050 struct gfs2_rgrpd *rgd = al->al_rgd;
1051 uint32_t goal, blk;
1052 uint64_t block;
1053
1054 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data))
1055 goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0;
1056 else
1057 goal = rgd->rd_last_alloc_data;
1058
1059 blk = rgblk_search(rgd, goal,
1060 GFS2_BLKST_FREE, GFS2_BLKST_USED);
1061 rgd->rd_last_alloc_data = blk;
1062
1063 block = rgd->rd_ri.ri_data0 + blk;
1064 ip->i_di.di_goal_data = block;
1065
1066 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1067 rgd->rd_rg.rg_free--;
1068
d4e9c4c3 1069 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
b3b94faa
DT
1070 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1071
1072 al->al_alloced++;
1073
1074 gfs2_statfs_change(sdp, 0, -1, 0);
1075 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1076
1077 spin_lock(&sdp->sd_rindex_spin);
1078 rgd->rd_free_clone--;
1079 spin_unlock(&sdp->sd_rindex_spin);
1080
1081 return block;
1082}
1083
1084/**
1085 * gfs2_alloc_meta - Allocate a metadata block
1086 * @ip: the inode to allocate the metadata block for
1087 *
1088 * Returns: the allocated block
1089 */
1090
1091uint64_t gfs2_alloc_meta(struct gfs2_inode *ip)
1092{
1093 struct gfs2_sbd *sdp = ip->i_sbd;
1094 struct gfs2_alloc *al = &ip->i_alloc;
1095 struct gfs2_rgrpd *rgd = al->al_rgd;
1096 uint32_t goal, blk;
1097 uint64_t block;
1098
1099 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta))
1100 goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0;
1101 else
1102 goal = rgd->rd_last_alloc_meta;
1103
1104 blk = rgblk_search(rgd, goal,
1105 GFS2_BLKST_FREE, GFS2_BLKST_USED);
1106 rgd->rd_last_alloc_meta = blk;
1107
1108 block = rgd->rd_ri.ri_data0 + blk;
1109 ip->i_di.di_goal_meta = block;
1110
1111 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1112 rgd->rd_rg.rg_free--;
1113
d4e9c4c3 1114 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
b3b94faa
DT
1115 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1116
1117 al->al_alloced++;
1118
1119 gfs2_statfs_change(sdp, 0, -1, 0);
1120 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1121 gfs2_trans_add_unrevoke(sdp, block);
1122
1123 spin_lock(&sdp->sd_rindex_spin);
1124 rgd->rd_free_clone--;
1125 spin_unlock(&sdp->sd_rindex_spin);
1126
1127 return block;
1128}
1129
1130/**
1131 * gfs2_alloc_di - Allocate a dinode
1132 * @dip: the directory that the inode is going in
1133 *
1134 * Returns: the block allocated
1135 */
1136
1137uint64_t gfs2_alloc_di(struct gfs2_inode *dip)
1138{
1139 struct gfs2_sbd *sdp = dip->i_sbd;
1140 struct gfs2_alloc *al = &dip->i_alloc;
1141 struct gfs2_rgrpd *rgd = al->al_rgd;
1142 uint32_t blk;
1143 uint64_t block;
1144
1145 blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
1146 GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
1147
1148 rgd->rd_last_alloc_meta = blk;
1149
1150 block = rgd->rd_ri.ri_data0 + blk;
1151
1152 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1153 rgd->rd_rg.rg_free--;
1154 rgd->rd_rg.rg_dinodes++;
1155
d4e9c4c3 1156 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
b3b94faa
DT
1157 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1158
1159 al->al_alloced++;
1160
1161 gfs2_statfs_change(sdp, 0, -1, +1);
1162 gfs2_trans_add_unrevoke(sdp, block);
1163
1164 spin_lock(&sdp->sd_rindex_spin);
1165 rgd->rd_free_clone--;
1166 spin_unlock(&sdp->sd_rindex_spin);
1167
1168 return block;
1169}
1170
1171/**
1172 * gfs2_free_data - free a contiguous run of data block(s)
1173 * @ip: the inode these blocks are being freed from
1174 * @bstart: first block of a run of contiguous blocks
1175 * @blen: the length of the block run
1176 *
1177 */
1178
1179void gfs2_free_data(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
1180{
1181 struct gfs2_sbd *sdp = ip->i_sbd;
1182 struct gfs2_rgrpd *rgd;
1183
1184 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1185 if (!rgd)
1186 return;
1187
1188 rgd->rd_rg.rg_free += blen;
1189
d4e9c4c3 1190 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
b3b94faa
DT
1191 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1192
1193 gfs2_trans_add_rg(rgd);
1194
1195 gfs2_statfs_change(sdp, 0, +blen, 0);
1196 gfs2_quota_change(ip, -(int64_t)blen,
1197 ip->i_di.di_uid, ip->i_di.di_gid);
1198}
1199
1200/**
1201 * gfs2_free_meta - free a contiguous run of data block(s)
1202 * @ip: the inode these blocks are being freed from
1203 * @bstart: first block of a run of contiguous blocks
1204 * @blen: the length of the block run
1205 *
1206 */
1207
1208void gfs2_free_meta(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
1209{
1210 struct gfs2_sbd *sdp = ip->i_sbd;
1211 struct gfs2_rgrpd *rgd;
1212
1213 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1214 if (!rgd)
1215 return;
1216
1217 rgd->rd_rg.rg_free += blen;
1218
d4e9c4c3 1219 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
b3b94faa
DT
1220 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1221
1222 gfs2_trans_add_rg(rgd);
1223
1224 gfs2_statfs_change(sdp, 0, +blen, 0);
1225 gfs2_quota_change(ip, -(int64_t)blen,
1226 ip->i_di.di_uid, ip->i_di.di_gid);
1227 gfs2_meta_wipe(ip, bstart, blen);
1228}
1229
1230void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, uint64_t blkno)
1231{
1232 struct gfs2_sbd *sdp = rgd->rd_sbd;
1233 struct gfs2_rgrpd *tmp_rgd;
1234
1235 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
1236 if (!tmp_rgd)
1237 return;
1238 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
1239
1240 if (!rgd->rd_rg.rg_dinodes)
1241 gfs2_consist_rgrpd(rgd);
1242 rgd->rd_rg.rg_dinodes--;
1243 rgd->rd_rg.rg_free++;
1244
d4e9c4c3 1245 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
b3b94faa
DT
1246 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1247
1248 gfs2_statfs_change(sdp, 0, +1, -1);
1249 gfs2_trans_add_rg(rgd);
1250}
1251
1252/**
1253 * gfs2_free_uninit_di - free a dinode block
1254 * @rgd: the resource group that contains the dinode
1255 * @ip: the inode
1256 *
1257 */
1258
1259void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1260{
1261 gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
1262 gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
1263 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
1264}
1265
1266/**
1267 * gfs2_rlist_add - add a RG to a list of RGs
1268 * @sdp: the filesystem
1269 * @rlist: the list of resource groups
1270 * @block: the block
1271 *
1272 * Figure out what RG a block belongs to and add that RG to the list
1273 *
1274 * FIXME: Don't use NOFAIL
1275 *
1276 */
1277
1278void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1279 uint64_t block)
1280{
1281 struct gfs2_rgrpd *rgd;
1282 struct gfs2_rgrpd **tmp;
1283 unsigned int new_space;
1284 unsigned int x;
1285
1286 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
1287 return;
1288
1289 rgd = gfs2_blk2rgrpd(sdp, block);
1290 if (!rgd) {
1291 if (gfs2_consist(sdp))
1292 fs_err(sdp, "block = %llu\n", block);
1293 return;
1294 }
1295
1296 for (x = 0; x < rlist->rl_rgrps; x++)
1297 if (rlist->rl_rgd[x] == rgd)
1298 return;
1299
1300 if (rlist->rl_rgrps == rlist->rl_space) {
1301 new_space = rlist->rl_space + 10;
1302
1303 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
1304 GFP_KERNEL | __GFP_NOFAIL);
1305
1306 if (rlist->rl_rgd) {
1307 memcpy(tmp, rlist->rl_rgd,
1308 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
1309 kfree(rlist->rl_rgd);
1310 }
1311
1312 rlist->rl_space = new_space;
1313 rlist->rl_rgd = tmp;
1314 }
1315
1316 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
1317}
1318
1319/**
1320 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1321 * and initialize an array of glock holders for them
1322 * @rlist: the list of resource groups
1323 * @state: the lock state to acquire the RG lock in
1324 * @flags: the modifier flags for the holder structures
1325 *
1326 * FIXME: Don't use NOFAIL
1327 *
1328 */
1329
1330void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
1331 int flags)
1332{
1333 unsigned int x;
1334
1335 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
1336 GFP_KERNEL | __GFP_NOFAIL);
1337 for (x = 0; x < rlist->rl_rgrps; x++)
1338 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1339 state, flags,
1340 &rlist->rl_ghs[x]);
1341}
1342
1343/**
1344 * gfs2_rlist_free - free a resource group list
1345 * @list: the list of resource groups
1346 *
1347 */
1348
1349void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
1350{
1351 unsigned int x;
1352
1353 kfree(rlist->rl_rgd);
1354
1355 if (rlist->rl_ghs) {
1356 for (x = 0; x < rlist->rl_rgrps; x++)
1357 gfs2_holder_uninit(&rlist->rl_ghs[x]);
1358 kfree(rlist->rl_ghs);
1359 }
1360}
1361