]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/gfs2/eattr.c
[GFS2] setup lock_dlm kobject earlier
[net-next-2.6.git] / fs / gfs2 / eattr.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/xattr.h>
5c676f6d 16#include <linux/gfs2_ondisk.h>
b3b94faa
DT
17#include <asm/uaccess.h>
18
19#include "gfs2.h"
5c676f6d
SW
20#include "lm_interface.h"
21#include "incore.h"
b3b94faa
DT
22#include "acl.h"
23#include "eaops.h"
24#include "eattr.h"
25#include "glock.h"
26#include "inode.h"
27#include "meta_io.h"
28#include "quota.h"
29#include "rgrp.h"
30#include "trans.h"
5c676f6d 31#include "util.h"
b3b94faa
DT
32
33/**
34 * ea_calc_size - returns the acutal number of bytes the request will take up
35 * (not counting any unstuffed data blocks)
36 * @sdp:
37 * @er:
38 * @size:
39 *
40 * Returns: 1 if the EA should be stuffed
41 */
42
43static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
44 unsigned int *size)
45{
46 *size = GFS2_EAREQ_SIZE_STUFFED(er);
47 if (*size <= sdp->sd_jbsize)
48 return 1;
49
50 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
51
52 return 0;
53}
54
55static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
56{
57 unsigned int size;
58
59 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
60 return -ERANGE;
61
62 ea_calc_size(sdp, er, &size);
63
64 /* This can only happen with 512 byte blocks */
65 if (size > sdp->sd_jbsize)
66 return -ERANGE;
67
68 return 0;
69}
70
71typedef int (*ea_call_t) (struct gfs2_inode *ip,
72 struct buffer_head *bh,
73 struct gfs2_ea_header *ea,
74 struct gfs2_ea_header *prev,
75 void *private);
76
77static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
78 ea_call_t ea_call, void *data)
79{
80 struct gfs2_ea_header *ea, *prev = NULL;
81 int error = 0;
82
83 if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_EA))
84 return -EIO;
85
86 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
87 if (!GFS2_EA_REC_LEN(ea))
88 goto fail;
89 if (!(bh->b_data <= (char *)ea &&
90 (char *)GFS2_EA2NEXT(ea) <=
91 bh->b_data + bh->b_size))
92 goto fail;
93 if (!GFS2_EATYPE_VALID(ea->ea_type))
94 goto fail;
95
96 error = ea_call(ip, bh, ea, prev, data);
97 if (error)
98 return error;
99
100 if (GFS2_EA_IS_LAST(ea)) {
101 if ((char *)GFS2_EA2NEXT(ea) !=
102 bh->b_data + bh->b_size)
103 goto fail;
104 break;
105 }
106 }
107
108 return error;
109
110 fail:
111 gfs2_consist_inode(ip);
112 return -EIO;
113}
114
115static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
116{
117 struct buffer_head *bh, *eabh;
118 uint64_t *eablk, *end;
119 int error;
120
121 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
122 DIO_START | DIO_WAIT, &bh);
123 if (error)
124 return error;
125
126 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
127 error = ea_foreach_i(ip, bh, ea_call, data);
128 goto out;
129 }
130
131 if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_IN)) {
132 error = -EIO;
133 goto out;
134 }
135
136 eablk = (uint64_t *)(bh->b_data + sizeof(struct gfs2_meta_header));
137 end = eablk + ip->i_sbd->sd_inptrs;
138
139 for (; eablk < end; eablk++) {
140 uint64_t bn;
141
142 if (!*eablk)
143 break;
144 bn = be64_to_cpu(*eablk);
145
146 error = gfs2_meta_read(ip->i_gl, bn, DIO_START | DIO_WAIT,
147 &eabh);
148 if (error)
149 break;
150 error = ea_foreach_i(ip, eabh, ea_call, data);
151 brelse(eabh);
152 if (error)
153 break;
154 }
155 out:
156 brelse(bh);
157
158 return error;
159}
160
161struct ea_find {
162 struct gfs2_ea_request *ef_er;
163 struct gfs2_ea_location *ef_el;
164};
165
166static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
167 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
168 void *private)
169{
170 struct ea_find *ef = private;
171 struct gfs2_ea_request *er = ef->ef_er;
172
173 if (ea->ea_type == GFS2_EATYPE_UNUSED)
174 return 0;
175
176 if (ea->ea_type == er->er_type) {
177 if (ea->ea_name_len == er->er_name_len &&
178 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
179 struct gfs2_ea_location *el = ef->ef_el;
180 get_bh(bh);
181 el->el_bh = bh;
182 el->el_ea = ea;
183 el->el_prev = prev;
184 return 1;
185 }
186 }
187
188#if 0
189 else if ((ip->i_di.di_flags & GFS2_DIF_EA_PACKED) &&
190 er->er_type == GFS2_EATYPE_SYS)
191 return 1;
192#endif
193
194 return 0;
195}
196
197int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
198 struct gfs2_ea_location *el)
199{
200 struct ea_find ef;
201 int error;
202
203 ef.ef_er = er;
204 ef.ef_el = el;
205
206 memset(el, 0, sizeof(struct gfs2_ea_location));
207
208 error = ea_foreach(ip, ea_find_i, &ef);
209 if (error > 0)
210 return 0;
211
212 return error;
213}
214
215/**
216 * ea_dealloc_unstuffed -
217 * @ip:
218 * @bh:
219 * @ea:
220 * @prev:
221 * @private:
222 *
223 * Take advantage of the fact that all unstuffed blocks are
224 * allocated from the same RG. But watch, this may not always
225 * be true.
226 *
227 * Returns: errno
228 */
229
230static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
231 struct gfs2_ea_header *ea,
232 struct gfs2_ea_header *prev, void *private)
233{
234 int *leave = private;
235 struct gfs2_sbd *sdp = ip->i_sbd;
236 struct gfs2_rgrpd *rgd;
237 struct gfs2_holder rg_gh;
238 struct buffer_head *dibh;
239 uint64_t *dataptrs, bn = 0;
240 uint64_t bstart = 0;
241 unsigned int blen = 0;
242 unsigned int blks = 0;
243 unsigned int x;
244 int error;
245
246 if (GFS2_EA_IS_STUFFED(ea))
247 return 0;
248
249 dataptrs = GFS2_EA2DATAPTRS(ea);
250 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++)
251 if (*dataptrs) {
252 blks++;
253 bn = be64_to_cpu(*dataptrs);
254 }
255 if (!blks)
256 return 0;
257
258 rgd = gfs2_blk2rgrpd(sdp, bn);
259 if (!rgd) {
260 gfs2_consist_inode(ip);
261 return -EIO;
262 }
263
264 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
265 if (error)
266 return error;
267
268 error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length +
269 RES_DINODE + RES_EATTR + RES_STATFS +
270 RES_QUOTA, blks);
271 if (error)
272 goto out_gunlock;
273
d4e9c4c3 274 gfs2_trans_add_bh(ip->i_gl, bh, 1);
b3b94faa
DT
275
276 dataptrs = GFS2_EA2DATAPTRS(ea);
277 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
278 if (!*dataptrs)
279 break;
280 bn = be64_to_cpu(*dataptrs);
281
282 if (bstart + blen == bn)
283 blen++;
284 else {
285 if (bstart)
286 gfs2_free_meta(ip, bstart, blen);
287 bstart = bn;
288 blen = 1;
289 }
290
291 *dataptrs = 0;
292 if (!ip->i_di.di_blocks)
293 gfs2_consist_inode(ip);
294 ip->i_di.di_blocks--;
295 }
296 if (bstart)
297 gfs2_free_meta(ip, bstart, blen);
298
299 if (prev && !leave) {
300 uint32_t len;
301
302 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
303 prev->ea_rec_len = cpu_to_be32(len);
304
305 if (GFS2_EA_IS_LAST(ea))
306 prev->ea_flags |= GFS2_EAFLAG_LAST;
307 } else {
308 ea->ea_type = GFS2_EATYPE_UNUSED;
309 ea->ea_num_ptrs = 0;
310 }
311
312 error = gfs2_meta_inode_buffer(ip, &dibh);
313 if (!error) {
314 ip->i_di.di_ctime = get_seconds();
d4e9c4c3 315 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
316 gfs2_dinode_out(&ip->i_di, dibh->b_data);
317 brelse(dibh);
318 }
319
320 gfs2_trans_end(sdp);
321
322 out_gunlock:
323 gfs2_glock_dq_uninit(&rg_gh);
324
325 return error;
326}
327
328static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
329 struct gfs2_ea_header *ea,
330 struct gfs2_ea_header *prev, int leave)
331{
332 struct gfs2_alloc *al;
333 int error;
334
335 al = gfs2_alloc_get(ip);
336
337 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
338 if (error)
339 goto out_alloc;
340
341 error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
342 if (error)
343 goto out_quota;
344
345 error = ea_dealloc_unstuffed(ip,
346 bh, ea, prev,
347 (leave) ? &error : NULL);
348
349 gfs2_glock_dq_uninit(&al->al_ri_gh);
350
351 out_quota:
352 gfs2_quota_unhold(ip);
353
354 out_alloc:
355 gfs2_alloc_put(ip);
356
357 return error;
358}
359
b3b94faa
DT
360struct ea_list {
361 struct gfs2_ea_request *ei_er;
362 unsigned int ei_size;
363};
364
365static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
366 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
367 void *private)
368{
369 struct ea_list *ei = private;
370 struct gfs2_ea_request *er = ei->ei_er;
371 unsigned int ea_size = GFS2_EA_STRLEN(ea);
372
373 if (ea->ea_type == GFS2_EATYPE_UNUSED)
374 return 0;
375
376 if (er->er_data_len) {
377 char *prefix;
378 unsigned int l;
379 char c = 0;
380
381 if (ei->ei_size + ea_size > er->er_data_len)
382 return -ERANGE;
383
384 if (ea->ea_type == GFS2_EATYPE_USR) {
385 prefix = "user.";
386 l = 5;
387 } else {
388 prefix = "system.";
389 l = 7;
390 }
391
392 memcpy(er->er_data + ei->ei_size,
393 prefix, l);
394 memcpy(er->er_data + ei->ei_size + l,
395 GFS2_EA2NAME(ea),
396 ea->ea_name_len);
397 memcpy(er->er_data + ei->ei_size +
398 ea_size - 1,
399 &c, 1);
400 }
401
402 ei->ei_size += ea_size;
403
404 return 0;
405}
406
407/**
408 * gfs2_ea_list -
409 * @ip:
410 * @er:
411 *
412 * Returns: actual size of data on success, -errno on error
413 */
414
415int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
416{
417 struct gfs2_holder i_gh;
418 int error;
419
420 if (!er->er_data || !er->er_data_len) {
421 er->er_data = NULL;
422 er->er_data_len = 0;
423 }
424
425 error = gfs2_glock_nq_init(ip->i_gl,
426 LM_ST_SHARED, LM_FLAG_ANY,
427 &i_gh);
428 if (error)
429 return error;
430
431 if (ip->i_di.di_eattr) {
432 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
433
434 error = ea_foreach(ip, ea_list_i, &ei);
435 if (!error)
436 error = ei.ei_size;
437 }
438
439 gfs2_glock_dq_uninit(&i_gh);
440
441 return error;
442}
443
444/**
445 * ea_get_unstuffed - actually copies the unstuffed data into the
446 * request buffer
447 * @ip:
448 * @ea:
449 * @data:
450 *
451 * Returns: errno
452 */
453
454static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
455 char *data)
456{
457 struct gfs2_sbd *sdp = ip->i_sbd;
458 struct buffer_head **bh;
459 unsigned int amount = GFS2_EA_DATA_LEN(ea);
5c676f6d 460 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
b3b94faa
DT
461 uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
462 unsigned int x;
463 int error = 0;
464
465 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
466 if (!bh)
467 return -ENOMEM;
468
469 for (x = 0; x < nptrs; x++) {
470 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
471 DIO_START, bh + x);
472 if (error) {
473 while (x--)
474 brelse(bh[x]);
475 goto out;
476 }
477 dataptrs++;
478 }
479
480 for (x = 0; x < nptrs; x++) {
481 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
482 if (error) {
483 for (; x < nptrs; x++)
484 brelse(bh[x]);
485 goto out;
486 }
487 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
488 for (; x < nptrs; x++)
489 brelse(bh[x]);
490 error = -EIO;
491 goto out;
492 }
493
494 memcpy(data,
495 bh[x]->b_data + sizeof(struct gfs2_meta_header),
496 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
497
498 amount -= sdp->sd_jbsize;
499 data += sdp->sd_jbsize;
500
501 brelse(bh[x]);
502 }
503
504 out:
505 kfree(bh);
506
507 return error;
508}
509
510int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
511 char *data)
512{
513 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
514 memcpy(data,
515 GFS2_EA2DATA(el->el_ea),
516 GFS2_EA_DATA_LEN(el->el_ea));
517 return 0;
518 } else
519 return ea_get_unstuffed(ip, el->el_ea, data);
520}
521
522/**
523 * gfs2_ea_get_i -
524 * @ip:
525 * @er:
526 *
527 * Returns: actual size of data on success, -errno on error
528 */
529
530int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
531{
532 struct gfs2_ea_location el;
533 int error;
534
535 if (!ip->i_di.di_eattr)
536 return -ENODATA;
537
538 error = gfs2_ea_find(ip, er, &el);
539 if (error)
540 return error;
541 if (!el.el_ea)
542 return -ENODATA;
543
544 if (er->er_data_len) {
545 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
546 error = -ERANGE;
547 else
548 error = gfs2_ea_get_copy(ip, &el, er->er_data);
549 }
550 if (!error)
551 error = GFS2_EA_DATA_LEN(el.el_ea);
552
553 brelse(el.el_bh);
554
555 return error;
556}
557
558/**
559 * gfs2_ea_get -
560 * @ip:
561 * @er:
562 *
563 * Returns: actual size of data on success, -errno on error
564 */
565
566int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
567{
568 struct gfs2_holder i_gh;
569 int error;
570
571 if (!er->er_name_len ||
572 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
573 return -EINVAL;
574 if (!er->er_data || !er->er_data_len) {
575 er->er_data = NULL;
576 er->er_data_len = 0;
577 }
578
579 error = gfs2_glock_nq_init(ip->i_gl,
580 LM_ST_SHARED, LM_FLAG_ANY,
581 &i_gh);
582 if (error)
583 return error;
584
585 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
586
587 gfs2_glock_dq_uninit(&i_gh);
588
589 return error;
590}
591
592/**
593 * ea_alloc_blk - allocates a new block for extended attributes.
594 * @ip: A pointer to the inode that's getting extended attributes
595 * @bhp:
596 *
597 * Returns: errno
598 */
599
600static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
601{
602 struct gfs2_sbd *sdp = ip->i_sbd;
603 struct gfs2_ea_header *ea;
604 uint64_t block;
605
606 block = gfs2_alloc_meta(ip);
607
608 *bhp = gfs2_meta_new(ip->i_gl, block);
d4e9c4c3 609 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
b3b94faa
DT
610 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
611 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
612
613 ea = GFS2_EA_BH2FIRST(*bhp);
614 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
615 ea->ea_type = GFS2_EATYPE_UNUSED;
616 ea->ea_flags = GFS2_EAFLAG_LAST;
617 ea->ea_num_ptrs = 0;
618
619 ip->i_di.di_blocks++;
620
621 return 0;
622}
623
624/**
625 * ea_write - writes the request info to an ea, creating new blocks if
626 * necessary
627 * @ip: inode that is being modified
628 * @ea: the location of the new ea in a block
629 * @er: the write request
630 *
631 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
632 *
633 * returns : errno
634 */
635
636static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
637 struct gfs2_ea_request *er)
638{
639 struct gfs2_sbd *sdp = ip->i_sbd;
640
641 ea->ea_data_len = cpu_to_be32(er->er_data_len);
642 ea->ea_name_len = er->er_name_len;
643 ea->ea_type = er->er_type;
644 ea->__pad = 0;
645
646 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
647
648 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
649 ea->ea_num_ptrs = 0;
650 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
651 } else {
652 uint64_t *dataptr = GFS2_EA2DATAPTRS(ea);
653 const char *data = er->er_data;
654 unsigned int data_len = er->er_data_len;
655 unsigned int copy;
656 unsigned int x;
657
5c676f6d 658 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
b3b94faa
DT
659 for (x = 0; x < ea->ea_num_ptrs; x++) {
660 struct buffer_head *bh;
661 uint64_t block;
662 int mh_size = sizeof(struct gfs2_meta_header);
663
664 block = gfs2_alloc_meta(ip);
665
666 bh = gfs2_meta_new(ip->i_gl, block);
d4e9c4c3 667 gfs2_trans_add_bh(ip->i_gl, bh, 1);
b3b94faa
DT
668 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
669
670 ip->i_di.di_blocks++;
671
672 copy = (data_len > sdp->sd_jbsize) ? sdp->sd_jbsize :
673 data_len;
674 memcpy(bh->b_data + mh_size, data, copy);
675 if (copy < sdp->sd_jbsize)
676 memset(bh->b_data + mh_size + copy, 0,
677 sdp->sd_jbsize - copy);
678
679 *dataptr++ = cpu_to_be64((uint64_t)bh->b_blocknr);
680 data += copy;
681 data_len -= copy;
682
683 brelse(bh);
684 }
685
686 gfs2_assert_withdraw(sdp, !data_len);
687 }
688
689 return 0;
690}
691
692typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
693 struct gfs2_ea_request *er,
694 void *private);
695
696static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
697 unsigned int blks,
698 ea_skeleton_call_t skeleton_call,
699 void *private)
700{
701 struct gfs2_alloc *al;
702 struct buffer_head *dibh;
703 int error;
704
705 al = gfs2_alloc_get(ip);
706
707 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
708 if (error)
709 goto out;
710
711 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
712 if (error)
713 goto out_gunlock_q;
714
715 al->al_requested = blks;
716
717 error = gfs2_inplace_reserve(ip);
718 if (error)
719 goto out_gunlock_q;
720
721 error = gfs2_trans_begin(ip->i_sbd,
722 blks + al->al_rgd->rd_ri.ri_length +
723 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
724 if (error)
725 goto out_ipres;
726
727 error = skeleton_call(ip, er, private);
728 if (error)
729 goto out_end_trans;
730
731 error = gfs2_meta_inode_buffer(ip, &dibh);
732 if (!error) {
733 if (er->er_flags & GFS2_ERF_MODE) {
734 gfs2_assert_withdraw(ip->i_sbd,
735 (ip->i_di.di_mode & S_IFMT) ==
736 (er->er_mode & S_IFMT));
737 ip->i_di.di_mode = er->er_mode;
738 }
739 ip->i_di.di_ctime = get_seconds();
d4e9c4c3 740 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
741 gfs2_dinode_out(&ip->i_di, dibh->b_data);
742 brelse(dibh);
743 }
744
745 out_end_trans:
746 gfs2_trans_end(ip->i_sbd);
747
748 out_ipres:
749 gfs2_inplace_release(ip);
750
751 out_gunlock_q:
752 gfs2_quota_unlock(ip);
753
754 out:
755 gfs2_alloc_put(ip);
756
757 return error;
758}
759
760static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
761 void *private)
762{
763 struct buffer_head *bh;
764 int error;
765
766 error = ea_alloc_blk(ip, &bh);
767 if (error)
768 return error;
769
770 ip->i_di.di_eattr = bh->b_blocknr;
771 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
772
773 brelse(bh);
774
775 return error;
776}
777
778/**
779 * ea_init - initializes a new eattr block
780 * @ip:
781 * @er:
782 *
783 * Returns: errno
784 */
785
786static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
787{
788 unsigned int jbsize = ip->i_sbd->sd_jbsize;
789 unsigned int blks = 1;
790
791 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
5c676f6d 792 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
b3b94faa
DT
793
794 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
795}
796
797static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
798{
799 uint32_t ea_size = GFS2_EA_SIZE(ea);
568f4c96
SW
800 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
801 ea_size);
b3b94faa
DT
802 uint32_t new_size = GFS2_EA_REC_LEN(ea) - ea_size;
803 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
804
805 ea->ea_rec_len = cpu_to_be32(ea_size);
806 ea->ea_flags ^= last;
807
808 new->ea_rec_len = cpu_to_be32(new_size);
809 new->ea_flags = last;
810
811 return new;
812}
813
814static void ea_set_remove_stuffed(struct gfs2_inode *ip,
815 struct gfs2_ea_location *el)
816{
817 struct gfs2_ea_header *ea = el->el_ea;
818 struct gfs2_ea_header *prev = el->el_prev;
819 uint32_t len;
820
d4e9c4c3 821 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
b3b94faa
DT
822
823 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
824 ea->ea_type = GFS2_EATYPE_UNUSED;
825 return;
826 } else if (GFS2_EA2NEXT(prev) != ea) {
827 prev = GFS2_EA2NEXT(prev);
828 gfs2_assert_withdraw(ip->i_sbd, GFS2_EA2NEXT(prev) == ea);
829 }
830
831 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
832 prev->ea_rec_len = cpu_to_be32(len);
833
834 if (GFS2_EA_IS_LAST(ea))
835 prev->ea_flags |= GFS2_EAFLAG_LAST;
836}
837
838struct ea_set {
839 int ea_split;
840
841 struct gfs2_ea_request *es_er;
842 struct gfs2_ea_location *es_el;
843
844 struct buffer_head *es_bh;
845 struct gfs2_ea_header *es_ea;
846};
847
848static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
849 struct gfs2_ea_header *ea, struct ea_set *es)
850{
851 struct gfs2_ea_request *er = es->es_er;
852 struct buffer_head *dibh;
853 int error;
854
855 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + 2 * RES_EATTR, 0);
856 if (error)
857 return error;
858
d4e9c4c3 859 gfs2_trans_add_bh(ip->i_gl, bh, 1);
b3b94faa
DT
860
861 if (es->ea_split)
862 ea = ea_split_ea(ea);
863
864 ea_write(ip, ea, er);
865
866 if (es->es_el)
867 ea_set_remove_stuffed(ip, es->es_el);
868
869 error = gfs2_meta_inode_buffer(ip, &dibh);
870 if (error)
871 goto out;
872
873 if (er->er_flags & GFS2_ERF_MODE) {
874 gfs2_assert_withdraw(ip->i_sbd,
875 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
876 ip->i_di.di_mode = er->er_mode;
877 }
878 ip->i_di.di_ctime = get_seconds();
d4e9c4c3 879 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
880 gfs2_dinode_out(&ip->i_di, dibh->b_data);
881 brelse(dibh);
882 out:
883 gfs2_trans_end(ip->i_sbd);
884
885 return error;
886}
887
888static int ea_set_simple_alloc(struct gfs2_inode *ip,
889 struct gfs2_ea_request *er, void *private)
890{
891 struct ea_set *es = private;
892 struct gfs2_ea_header *ea = es->es_ea;
893 int error;
894
d4e9c4c3 895 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
b3b94faa
DT
896
897 if (es->ea_split)
898 ea = ea_split_ea(ea);
899
900 error = ea_write(ip, ea, er);
901 if (error)
902 return error;
903
904 if (es->es_el)
905 ea_set_remove_stuffed(ip, es->es_el);
906
907 return 0;
908}
909
910static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
911 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
912 void *private)
913{
914 struct ea_set *es = private;
915 unsigned int size;
916 int stuffed;
917 int error;
918
919 stuffed = ea_calc_size(ip->i_sbd, es->es_er, &size);
920
921 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
922 if (GFS2_EA_REC_LEN(ea) < size)
923 return 0;
924 if (!GFS2_EA_IS_STUFFED(ea)) {
925 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
926 if (error)
927 return error;
928 }
929 es->ea_split = 0;
930 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
931 es->ea_split = 1;
932 else
933 return 0;
934
935 if (stuffed) {
936 error = ea_set_simple_noalloc(ip, bh, ea, es);
937 if (error)
938 return error;
939 } else {
940 unsigned int blks;
941
942 es->es_bh = bh;
943 es->es_ea = ea;
5c676f6d
SW
944 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
945 ip->i_sbd->sd_jbsize);
b3b94faa
DT
946
947 error = ea_alloc_skeleton(ip, es->es_er, blks,
948 ea_set_simple_alloc, es);
949 if (error)
950 return error;
951 }
952
953 return 1;
954}
955
956static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
957 void *private)
958{
959 struct gfs2_sbd *sdp = ip->i_sbd;
960 struct buffer_head *indbh, *newbh;
961 uint64_t *eablk;
962 int error;
963 int mh_size = sizeof(struct gfs2_meta_header);
964
965 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
966 uint64_t *end;
967
968 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
969 DIO_START | DIO_WAIT, &indbh);
970 if (error)
971 return error;
972
973 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
974 error = -EIO;
975 goto out;
976 }
977
978 eablk = (uint64_t *)(indbh->b_data + mh_size);
979 end = eablk + sdp->sd_inptrs;
980
981 for (; eablk < end; eablk++)
982 if (!*eablk)
983 break;
984
985 if (eablk == end) {
986 error = -ENOSPC;
987 goto out;
988 }
989
d4e9c4c3 990 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
b3b94faa
DT
991 } else {
992 uint64_t blk;
993
994 blk = gfs2_alloc_meta(ip);
995
996 indbh = gfs2_meta_new(ip->i_gl, blk);
d4e9c4c3 997 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
b3b94faa
DT
998 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
999 gfs2_buffer_clear_tail(indbh, mh_size);
1000
1001 eablk = (uint64_t *)(indbh->b_data + mh_size);
1002 *eablk = cpu_to_be64(ip->i_di.di_eattr);
1003 ip->i_di.di_eattr = blk;
1004 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
1005 ip->i_di.di_blocks++;
1006
1007 eablk++;
1008 }
1009
1010 error = ea_alloc_blk(ip, &newbh);
1011 if (error)
1012 goto out;
1013
1014 *eablk = cpu_to_be64((uint64_t)newbh->b_blocknr);
1015 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1016 brelse(newbh);
1017 if (error)
1018 goto out;
1019
1020 if (private)
1021 ea_set_remove_stuffed(ip, (struct gfs2_ea_location *)private);
1022
1023 out:
1024 brelse(indbh);
1025
1026 return error;
1027}
1028
1029static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1030 struct gfs2_ea_location *el)
1031{
1032 struct ea_set es;
1033 unsigned int blks = 2;
1034 int error;
1035
1036 memset(&es, 0, sizeof(struct ea_set));
1037 es.es_er = er;
1038 es.es_el = el;
1039
1040 error = ea_foreach(ip, ea_set_simple, &es);
1041 if (error > 0)
1042 return 0;
1043 if (error)
1044 return error;
1045
1046 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1047 blks++;
1048 if (GFS2_EAREQ_SIZE_STUFFED(er) > ip->i_sbd->sd_jbsize)
5c676f6d 1049 blks += DIV_ROUND_UP(er->er_data_len, ip->i_sbd->sd_jbsize);
b3b94faa
DT
1050
1051 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1052}
1053
1054static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1055 struct gfs2_ea_location *el)
1056{
1057 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1058 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1059 gfs2_assert_withdraw(ip->i_sbd,
1060 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1061 }
1062
1063 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1064}
1065
1066int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1067{
1068 struct gfs2_ea_location el;
1069 int error;
1070
1071 if (!ip->i_di.di_eattr) {
1072 if (er->er_flags & XATTR_REPLACE)
1073 return -ENODATA;
1074 return ea_init(ip, er);
1075 }
1076
1077 error = gfs2_ea_find(ip, er, &el);
1078 if (error)
1079 return error;
1080
1081 if (el.el_ea) {
1082 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1083 brelse(el.el_bh);
1084 return -EPERM;
1085 }
1086
1087 error = -EEXIST;
1088 if (!(er->er_flags & XATTR_CREATE)) {
1089 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1090 error = ea_set_i(ip, er, &el);
1091 if (!error && unstuffed)
1092 ea_set_remove_unstuffed(ip, &el);
1093 }
1094
1095 brelse(el.el_bh);
1096 } else {
1097 error = -ENODATA;
1098 if (!(er->er_flags & XATTR_REPLACE))
1099 error = ea_set_i(ip, er, NULL);
1100 }
1101
1102 return error;
1103}
1104
1105int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1106{
1107 struct gfs2_holder i_gh;
1108 int error;
1109
1110 if (!er->er_name_len ||
1111 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1112 return -EINVAL;
1113 if (!er->er_data || !er->er_data_len) {
1114 er->er_data = NULL;
1115 er->er_data_len = 0;
1116 }
1117 error = ea_check_size(ip->i_sbd, er);
1118 if (error)
1119 return error;
1120
1121 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1122 if (error)
1123 return error;
1124
1125 if (IS_IMMUTABLE(ip->i_vnode))
1126 error = -EPERM;
1127 else
1128 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1129
1130 gfs2_glock_dq_uninit(&i_gh);
1131
1132 return error;
1133}
1134
1135static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1136{
1137 struct gfs2_ea_header *ea = el->el_ea;
1138 struct gfs2_ea_header *prev = el->el_prev;
1139 struct buffer_head *dibh;
1140 int error;
1141
1142 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1143 if (error)
1144 return error;
1145
d4e9c4c3 1146 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
b3b94faa
DT
1147
1148 if (prev) {
1149 uint32_t len;
1150
1151 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1152 prev->ea_rec_len = cpu_to_be32(len);
1153
1154 if (GFS2_EA_IS_LAST(ea))
1155 prev->ea_flags |= GFS2_EAFLAG_LAST;
1156 } else
1157 ea->ea_type = GFS2_EATYPE_UNUSED;
1158
1159 error = gfs2_meta_inode_buffer(ip, &dibh);
1160 if (!error) {
1161 ip->i_di.di_ctime = get_seconds();
d4e9c4c3 1162 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
1163 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1164 brelse(dibh);
1165 }
1166
1167 gfs2_trans_end(ip->i_sbd);
1168
1169 return error;
1170}
1171
1172int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1173{
1174 struct gfs2_ea_location el;
1175 int error;
1176
1177 if (!ip->i_di.di_eattr)
1178 return -ENODATA;
1179
1180 error = gfs2_ea_find(ip, er, &el);
1181 if (error)
1182 return error;
1183 if (!el.el_ea)
1184 return -ENODATA;
1185
1186 if (GFS2_EA_IS_STUFFED(el.el_ea))
1187 error = ea_remove_stuffed(ip, &el);
1188 else
1189 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1190 0);
1191
1192 brelse(el.el_bh);
1193
1194 return error;
1195}
1196
1197/**
1198 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1199 * @ip: pointer to the inode of the target file
1200 * @er: request information
1201 *
1202 * Returns: errno
1203 */
1204
1205int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1206{
1207 struct gfs2_holder i_gh;
1208 int error;
1209
1210 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1211 return -EINVAL;
1212
1213 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1214 if (error)
1215 return error;
1216
1217 if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
1218 error = -EPERM;
1219 else
1220 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1221
1222 gfs2_glock_dq_uninit(&i_gh);
1223
1224 return error;
1225}
1226
1227static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1228 struct gfs2_ea_header *ea, char *data)
1229{
1230 struct gfs2_sbd *sdp = ip->i_sbd;
1231 struct buffer_head **bh;
1232 unsigned int amount = GFS2_EA_DATA_LEN(ea);
5c676f6d 1233 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
b3b94faa
DT
1234 uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
1235 unsigned int x;
1236 int error;
1237
1238 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1239 if (!bh)
1240 return -ENOMEM;
1241
1242 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1243 if (error)
1244 goto out;
1245
1246 for (x = 0; x < nptrs; x++) {
1247 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
1248 DIO_START, bh + x);
1249 if (error) {
1250 while (x--)
1251 brelse(bh[x]);
1252 goto fail;
1253 }
1254 dataptrs++;
1255 }
1256
1257 for (x = 0; x < nptrs; x++) {
1258 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
1259 if (error) {
1260 for (; x < nptrs; x++)
1261 brelse(bh[x]);
1262 goto fail;
1263 }
1264 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1265 for (; x < nptrs; x++)
1266 brelse(bh[x]);
1267 error = -EIO;
1268 goto fail;
1269 }
1270
d4e9c4c3 1271 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
b3b94faa
DT
1272
1273 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header),
1274 data,
1275 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1276
1277 amount -= sdp->sd_jbsize;
1278 data += sdp->sd_jbsize;
1279
1280 brelse(bh[x]);
1281 }
1282
1283 out:
1284 kfree(bh);
1285
1286 return error;
1287
1288 fail:
1289 gfs2_trans_end(sdp);
1290 kfree(bh);
1291
1292 return error;
1293}
1294
1295int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1296 struct iattr *attr, char *data)
1297{
1298 struct buffer_head *dibh;
1299 int error;
1300
1301 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1302 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1303 if (error)
1304 return error;
1305
d4e9c4c3 1306 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
b3b94faa
DT
1307 memcpy(GFS2_EA2DATA(el->el_ea),
1308 data,
1309 GFS2_EA_DATA_LEN(el->el_ea));
1310 } else
1311 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1312
1313 if (error)
1314 return error;
1315
1316 error = gfs2_meta_inode_buffer(ip, &dibh);
1317 if (!error) {
1318 error = inode_setattr(ip->i_vnode, attr);
1319 gfs2_assert_warn(ip->i_sbd, !error);
1320 gfs2_inode_attr_out(ip);
d4e9c4c3 1321 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
1322 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1323 brelse(dibh);
1324 }
1325
1326 gfs2_trans_end(ip->i_sbd);
1327
1328 return error;
1329}
1330
1331static int ea_dealloc_indirect(struct gfs2_inode *ip)
1332{
1333 struct gfs2_sbd *sdp = ip->i_sbd;
1334 struct gfs2_rgrp_list rlist;
1335 struct buffer_head *indbh, *dibh;
1336 uint64_t *eablk, *end;
1337 unsigned int rg_blocks = 0;
1338 uint64_t bstart = 0;
1339 unsigned int blen = 0;
1340 unsigned int blks = 0;
1341 unsigned int x;
1342 int error;
1343
1344 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1345
1346 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
1347 DIO_START | DIO_WAIT, &indbh);
1348 if (error)
1349 return error;
1350
1351 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1352 error = -EIO;
1353 goto out;
1354 }
1355
1356 eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1357 end = eablk + sdp->sd_inptrs;
1358
1359 for (; eablk < end; eablk++) {
1360 uint64_t bn;
1361
1362 if (!*eablk)
1363 break;
1364 bn = be64_to_cpu(*eablk);
1365
1366 if (bstart + blen == bn)
1367 blen++;
1368 else {
1369 if (bstart)
1370 gfs2_rlist_add(sdp, &rlist, bstart);
1371 bstart = bn;
1372 blen = 1;
1373 }
1374 blks++;
1375 }
1376 if (bstart)
1377 gfs2_rlist_add(sdp, &rlist, bstart);
1378 else
1379 goto out;
1380
1381 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1382
1383 for (x = 0; x < rlist.rl_rgrps; x++) {
1384 struct gfs2_rgrpd *rgd;
5c676f6d 1385 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
b3b94faa
DT
1386 rg_blocks += rgd->rd_ri.ri_length;
1387 }
1388
1389 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1390 if (error)
1391 goto out_rlist_free;
1392
1393 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
1394 RES_INDIRECT + RES_STATFS +
1395 RES_QUOTA, blks);
1396 if (error)
1397 goto out_gunlock;
1398
d4e9c4c3 1399 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
b3b94faa
DT
1400
1401 eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1402 bstart = 0;
1403 blen = 0;
1404
1405 for (; eablk < end; eablk++) {
1406 uint64_t bn;
1407
1408 if (!*eablk)
1409 break;
1410 bn = be64_to_cpu(*eablk);
1411
1412 if (bstart + blen == bn)
1413 blen++;
1414 else {
1415 if (bstart)
1416 gfs2_free_meta(ip, bstart, blen);
1417 bstart = bn;
1418 blen = 1;
1419 }
1420
1421 *eablk = 0;
1422 if (!ip->i_di.di_blocks)
1423 gfs2_consist_inode(ip);
1424 ip->i_di.di_blocks--;
1425 }
1426 if (bstart)
1427 gfs2_free_meta(ip, bstart, blen);
1428
1429 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1430
1431 error = gfs2_meta_inode_buffer(ip, &dibh);
1432 if (!error) {
d4e9c4c3 1433 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
1434 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1435 brelse(dibh);
1436 }
1437
1438 gfs2_trans_end(sdp);
1439
1440 out_gunlock:
1441 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1442
1443 out_rlist_free:
1444 gfs2_rlist_free(&rlist);
1445
1446 out:
1447 brelse(indbh);
1448
1449 return error;
1450}
1451
1452static int ea_dealloc_block(struct gfs2_inode *ip)
1453{
1454 struct gfs2_sbd *sdp = ip->i_sbd;
1455 struct gfs2_alloc *al = &ip->i_alloc;
1456 struct gfs2_rgrpd *rgd;
1457 struct buffer_head *dibh;
1458 int error;
1459
1460 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1461 if (!rgd) {
1462 gfs2_consist_inode(ip);
1463 return -EIO;
1464 }
1465
1466 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1467 &al->al_rgd_gh);
1468 if (error)
1469 return error;
1470
1471 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE +
1472 RES_STATFS + RES_QUOTA, 1);
1473 if (error)
1474 goto out_gunlock;
1475
1476 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1477
1478 ip->i_di.di_eattr = 0;
1479 if (!ip->i_di.di_blocks)
1480 gfs2_consist_inode(ip);
1481 ip->i_di.di_blocks--;
1482
1483 error = gfs2_meta_inode_buffer(ip, &dibh);
1484 if (!error) {
d4e9c4c3 1485 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
1486 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1487 brelse(dibh);
1488 }
1489
1490 gfs2_trans_end(sdp);
1491
1492 out_gunlock:
1493 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1494
1495 return error;
1496}
1497
1498/**
1499 * gfs2_ea_dealloc - deallocate the extended attribute fork
1500 * @ip: the inode
1501 *
1502 * Returns: errno
1503 */
1504
1505int gfs2_ea_dealloc(struct gfs2_inode *ip)
1506{
1507 struct gfs2_alloc *al;
1508 int error;
1509
1510 al = gfs2_alloc_get(ip);
1511
1512 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1513 if (error)
1514 goto out_alloc;
1515
1516 error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
1517 if (error)
1518 goto out_quota;
1519
1520 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1521 if (error)
1522 goto out_rindex;
1523
1524 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1525 error = ea_dealloc_indirect(ip);
1526 if (error)
1527 goto out_rindex;
1528 }
1529
1530 error = ea_dealloc_block(ip);
1531
1532 out_rindex:
1533 gfs2_glock_dq_uninit(&al->al_ri_gh);
1534
1535 out_quota:
1536 gfs2_quota_unhold(ip);
1537
1538 out_alloc:
1539 gfs2_alloc_put(ip);
1540
1541 return error;
1542}
1543