]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/gfs2/quota.c
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[net-next-2.6.git] / fs / gfs2 / quota.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0d0868bd 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10/*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
1e72c0f7 18 * Since quota tags are part of transactions, there is no need for a quota check
b3b94faa
DT
19 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
b3b94faa 44#include <linux/sort.h>
18ec7d5c 45#include <linux/fs.h>
2e565bb6 46#include <linux/bio.h>
5c676f6d 47#include <linux/gfs2_ondisk.h>
37b2c837
SW
48#include <linux/kthread.h>
49#include <linux/freezer.h>
2ec46505 50#include <linux/quota.h>
1d371b5e 51#include <linux/dqblk_xfs.h>
b3b94faa
DT
52
53#include "gfs2.h"
5c676f6d 54#include "incore.h"
b3b94faa
DT
55#include "bmap.h"
56#include "glock.h"
57#include "glops.h"
b3b94faa
DT
58#include "log.h"
59#include "meta_io.h"
60#include "quota.h"
61#include "rgrp.h"
62#include "super.h"
63#include "trans.h"
18ec7d5c 64#include "inode.h"
5c676f6d 65#include "util.h"
b3b94faa
DT
66
67#define QUOTA_USER 1
68#define QUOTA_GROUP 0
69
bb8d8a6f
SW
70struct gfs2_quota_change_host {
71 u64 qc_change;
72 u32 qc_flags; /* GFS2_QCF_... */
73 u32 qc_id;
74};
75
0a7ab79c
AD
76static LIST_HEAD(qd_lru_list);
77static atomic_t qd_lru_count = ATOMIC_INIT(0);
1328df72 78static DEFINE_SPINLOCK(qd_lru_lock);
0a7ab79c 79
7f8275d0 80int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
0a7ab79c
AD
81{
82 struct gfs2_quota_data *qd;
83 struct gfs2_sbd *sdp;
84
85 if (nr == 0)
86 goto out;
87
88 if (!(gfp_mask & __GFP_FS))
89 return -1;
90
91 spin_lock(&qd_lru_lock);
92 while (nr && !list_empty(&qd_lru_list)) {
93 qd = list_entry(qd_lru_list.next,
94 struct gfs2_quota_data, qd_reclaim);
95 sdp = qd->qd_gl->gl_sbd;
96
97 /* Free from the filesystem-specific list */
98 list_del(&qd->qd_list);
99
0a7ab79c
AD
100 gfs2_assert_warn(sdp, !qd->qd_change);
101 gfs2_assert_warn(sdp, !qd->qd_slot_count);
102 gfs2_assert_warn(sdp, !qd->qd_bh_count);
103
f057f6cd 104 gfs2_glock_put(qd->qd_gl);
0a7ab79c
AD
105 atomic_dec(&sdp->sd_quota_count);
106
107 /* Delete it from the common reclaim list */
108 list_del_init(&qd->qd_reclaim);
109 atomic_dec(&qd_lru_count);
110 spin_unlock(&qd_lru_lock);
111 kmem_cache_free(gfs2_quotad_cachep, qd);
112 spin_lock(&qd_lru_lock);
113 nr--;
114 }
115 spin_unlock(&qd_lru_lock);
116
117out:
118 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
119}
120
cd915493 121static u64 qd2offset(struct gfs2_quota_data *qd)
b3b94faa 122{
cd915493 123 u64 offset;
b3b94faa 124
cd915493 125 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
b3b94faa
DT
126 offset *= sizeof(struct gfs2_quota);
127
128 return offset;
129}
130
cd915493 131static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
b3b94faa
DT
132 struct gfs2_quota_data **qdp)
133{
134 struct gfs2_quota_data *qd;
135 int error;
136
37b2c837 137 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
b3b94faa
DT
138 if (!qd)
139 return -ENOMEM;
140
0a7ab79c 141 atomic_set(&qd->qd_count, 1);
b3b94faa
DT
142 qd->qd_id = id;
143 if (user)
144 set_bit(QDF_USER, &qd->qd_flags);
145 qd->qd_slot = -1;
0a7ab79c 146 INIT_LIST_HEAD(&qd->qd_reclaim);
b3b94faa 147
cd915493 148 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
b3b94faa
DT
149 &gfs2_quota_glops, CREATE, &qd->qd_gl);
150 if (error)
151 goto fail;
152
b3b94faa
DT
153 *qdp = qd;
154
155 return 0;
156
a91ea69f 157fail:
37b2c837 158 kmem_cache_free(gfs2_quotad_cachep, qd);
b3b94faa
DT
159 return error;
160}
161
6a6ada81 162static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
b3b94faa
DT
163 struct gfs2_quota_data **qdp)
164{
165 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
166 int error, found;
167
168 *qdp = NULL;
169
170 for (;;) {
171 found = 0;
0a7ab79c 172 spin_lock(&qd_lru_lock);
b3b94faa
DT
173 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
174 if (qd->qd_id == id &&
175 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
0a7ab79c
AD
176 if (!atomic_read(&qd->qd_count) &&
177 !list_empty(&qd->qd_reclaim)) {
178 /* Remove it from reclaim list */
179 list_del_init(&qd->qd_reclaim);
180 atomic_dec(&qd_lru_count);
181 }
182 atomic_inc(&qd->qd_count);
b3b94faa
DT
183 found = 1;
184 break;
185 }
186 }
187
188 if (!found)
189 qd = NULL;
190
191 if (!qd && new_qd) {
192 qd = new_qd;
193 list_add(&qd->qd_list, &sdp->sd_quota_list);
194 atomic_inc(&sdp->sd_quota_count);
195 new_qd = NULL;
196 }
197
0a7ab79c 198 spin_unlock(&qd_lru_lock);
b3b94faa 199
6a6ada81 200 if (qd) {
b3b94faa 201 if (new_qd) {
f057f6cd 202 gfs2_glock_put(new_qd->qd_gl);
37b2c837 203 kmem_cache_free(gfs2_quotad_cachep, new_qd);
b3b94faa
DT
204 }
205 *qdp = qd;
206 return 0;
207 }
208
209 error = qd_alloc(sdp, user, id, &new_qd);
210 if (error)
211 return error;
212 }
213}
214
215static void qd_hold(struct gfs2_quota_data *qd)
216{
217 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
0a7ab79c
AD
218 gfs2_assert(sdp, atomic_read(&qd->qd_count));
219 atomic_inc(&qd->qd_count);
b3b94faa
DT
220}
221
222static void qd_put(struct gfs2_quota_data *qd)
223{
0a7ab79c
AD
224 if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
225 /* Add to the reclaim list */
226 list_add_tail(&qd->qd_reclaim, &qd_lru_list);
227 atomic_inc(&qd_lru_count);
228 spin_unlock(&qd_lru_lock);
229 }
b3b94faa
DT
230}
231
232static int slot_get(struct gfs2_quota_data *qd)
233{
234 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
235 unsigned int c, o = 0, b;
236 unsigned char byte = 0;
237
22077f57 238 spin_lock(&qd_lru_lock);
b3b94faa
DT
239
240 if (qd->qd_slot_count++) {
22077f57 241 spin_unlock(&qd_lru_lock);
b3b94faa
DT
242 return 0;
243 }
244
245 for (c = 0; c < sdp->sd_quota_chunks; c++)
246 for (o = 0; o < PAGE_SIZE; o++) {
247 byte = sdp->sd_quota_bitmap[c][o];
248 if (byte != 0xFF)
249 goto found;
250 }
251
252 goto fail;
253
a91ea69f 254found:
b3b94faa
DT
255 for (b = 0; b < 8; b++)
256 if (!(byte & (1 << b)))
257 break;
258 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
259
260 if (qd->qd_slot >= sdp->sd_quota_slots)
261 goto fail;
262
263 sdp->sd_quota_bitmap[c][o] |= 1 << b;
264
22077f57 265 spin_unlock(&qd_lru_lock);
b3b94faa
DT
266
267 return 0;
268
a91ea69f 269fail:
b3b94faa 270 qd->qd_slot_count--;
22077f57 271 spin_unlock(&qd_lru_lock);
b3b94faa
DT
272 return -ENOSPC;
273}
274
275static void slot_hold(struct gfs2_quota_data *qd)
276{
277 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
278
22077f57 279 spin_lock(&qd_lru_lock);
b3b94faa
DT
280 gfs2_assert(sdp, qd->qd_slot_count);
281 qd->qd_slot_count++;
22077f57 282 spin_unlock(&qd_lru_lock);
b3b94faa
DT
283}
284
285static void slot_put(struct gfs2_quota_data *qd)
286{
287 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
288
22077f57 289 spin_lock(&qd_lru_lock);
b3b94faa
DT
290 gfs2_assert(sdp, qd->qd_slot_count);
291 if (!--qd->qd_slot_count) {
292 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
293 qd->qd_slot = -1;
294 }
22077f57 295 spin_unlock(&qd_lru_lock);
b3b94faa
DT
296}
297
298static int bh_get(struct gfs2_quota_data *qd)
299{
300 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 301 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 302 unsigned int block, offset;
b3b94faa
DT
303 struct buffer_head *bh;
304 int error;
23591256 305 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
b3b94faa 306
f55ab26a 307 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
308
309 if (qd->qd_bh_count++) {
f55ab26a 310 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
311 return 0;
312 }
313
314 block = qd->qd_slot / sdp->sd_qc_per_block;
0d0868bd 315 offset = qd->qd_slot % sdp->sd_qc_per_block;
b3b94faa 316
23591256 317 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
e9e1ef2b 318 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
b3b94faa
DT
319 if (error)
320 goto fail;
7276b3b0 321 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
b3b94faa
DT
322 if (error)
323 goto fail;
324 error = -EIO;
325 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
326 goto fail_brelse;
327
328 qd->qd_bh = bh;
329 qd->qd_bh_qc = (struct gfs2_quota_change *)
330 (bh->b_data + sizeof(struct gfs2_meta_header) +
331 offset * sizeof(struct gfs2_quota_change));
332
2e95b665 333 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
334
335 return 0;
336
a91ea69f 337fail_brelse:
b3b94faa 338 brelse(bh);
a91ea69f 339fail:
b3b94faa 340 qd->qd_bh_count--;
f55ab26a 341 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
342 return error;
343}
344
345static void bh_put(struct gfs2_quota_data *qd)
346{
347 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
348
f55ab26a 349 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
350 gfs2_assert(sdp, qd->qd_bh_count);
351 if (!--qd->qd_bh_count) {
352 brelse(qd->qd_bh);
353 qd->qd_bh = NULL;
354 qd->qd_bh_qc = NULL;
355 }
f55ab26a 356 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
357}
358
359static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
360{
361 struct gfs2_quota_data *qd = NULL;
362 int error;
363 int found = 0;
364
365 *qdp = NULL;
366
367 if (sdp->sd_vfs->s_flags & MS_RDONLY)
368 return 0;
369
0a7ab79c 370 spin_lock(&qd_lru_lock);
b3b94faa
DT
371
372 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
373 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
374 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
375 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
376 continue;
377
378 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
379
380 set_bit(QDF_LOCKED, &qd->qd_flags);
0a7ab79c
AD
381 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
382 atomic_inc(&qd->qd_count);
b3b94faa
DT
383 qd->qd_change_sync = qd->qd_change;
384 gfs2_assert_warn(sdp, qd->qd_slot_count);
385 qd->qd_slot_count++;
386 found = 1;
387
388 break;
389 }
390
391 if (!found)
392 qd = NULL;
393
0a7ab79c 394 spin_unlock(&qd_lru_lock);
b3b94faa
DT
395
396 if (qd) {
397 gfs2_assert_warn(sdp, qd->qd_change_sync);
398 error = bh_get(qd);
399 if (error) {
400 clear_bit(QDF_LOCKED, &qd->qd_flags);
401 slot_put(qd);
402 qd_put(qd);
403 return error;
404 }
405 }
406
407 *qdp = qd;
408
409 return 0;
410}
411
412static int qd_trylock(struct gfs2_quota_data *qd)
413{
414 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
415
416 if (sdp->sd_vfs->s_flags & MS_RDONLY)
417 return 0;
418
0a7ab79c 419 spin_lock(&qd_lru_lock);
b3b94faa
DT
420
421 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
422 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
0a7ab79c 423 spin_unlock(&qd_lru_lock);
b3b94faa
DT
424 return 0;
425 }
426
427 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
428
429 set_bit(QDF_LOCKED, &qd->qd_flags);
0a7ab79c
AD
430 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
431 atomic_inc(&qd->qd_count);
b3b94faa
DT
432 qd->qd_change_sync = qd->qd_change;
433 gfs2_assert_warn(sdp, qd->qd_slot_count);
434 qd->qd_slot_count++;
435
0a7ab79c 436 spin_unlock(&qd_lru_lock);
b3b94faa
DT
437
438 gfs2_assert_warn(sdp, qd->qd_change_sync);
439 if (bh_get(qd)) {
440 clear_bit(QDF_LOCKED, &qd->qd_flags);
441 slot_put(qd);
442 qd_put(qd);
443 return 0;
444 }
445
446 return 1;
447}
448
449static void qd_unlock(struct gfs2_quota_data *qd)
450{
568f4c96
SW
451 gfs2_assert_warn(qd->qd_gl->gl_sbd,
452 test_bit(QDF_LOCKED, &qd->qd_flags));
b3b94faa
DT
453 clear_bit(QDF_LOCKED, &qd->qd_flags);
454 bh_put(qd);
455 slot_put(qd);
456 qd_put(qd);
457}
458
33a82529 459static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
b3b94faa
DT
460 struct gfs2_quota_data **qdp)
461{
462 int error;
463
6a6ada81 464 error = qd_get(sdp, user, id, qdp);
b3b94faa
DT
465 if (error)
466 return error;
467
468 error = slot_get(*qdp);
469 if (error)
470 goto fail;
471
472 error = bh_get(*qdp);
473 if (error)
474 goto fail_slot;
475
476 return 0;
477
a91ea69f 478fail_slot:
b3b94faa 479 slot_put(*qdp);
a91ea69f 480fail:
b3b94faa
DT
481 qd_put(*qdp);
482 return error;
483}
484
485static void qdsb_put(struct gfs2_quota_data *qd)
486{
487 bh_put(qd);
488 slot_put(qd);
489 qd_put(qd);
490}
491
cd915493 492int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 493{
feaa7bba 494 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 495 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
496 struct gfs2_quota_data **qd = al->al_qd;
497 int error;
498
499 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
500 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
501 return -EIO;
502
503 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
504 return 0;
505
33a82529 506 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
b3b94faa
DT
507 if (error)
508 goto out;
509 al->al_qd_num++;
510 qd++;
511
33a82529 512 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
b3b94faa
DT
513 if (error)
514 goto out;
515 al->al_qd_num++;
516 qd++;
517
2933f925 518 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
33a82529 519 error = qdsb_get(sdp, QUOTA_USER, uid, qd);
b3b94faa
DT
520 if (error)
521 goto out;
522 al->al_qd_num++;
523 qd++;
524 }
525
2933f925 526 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
33a82529 527 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
b3b94faa
DT
528 if (error)
529 goto out;
530 al->al_qd_num++;
531 qd++;
532 }
533
a91ea69f 534out:
b3b94faa
DT
535 if (error)
536 gfs2_quota_unhold(ip);
b3b94faa
DT
537 return error;
538}
539
540void gfs2_quota_unhold(struct gfs2_inode *ip)
541{
feaa7bba 542 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 543 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
544 unsigned int x;
545
546 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
547
548 for (x = 0; x < al->al_qd_num; x++) {
549 qdsb_put(al->al_qd[x]);
550 al->al_qd[x] = NULL;
551 }
552 al->al_qd_num = 0;
553}
554
555static int sort_qd(const void *a, const void *b)
556{
48fac179
SW
557 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
558 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
b3b94faa
DT
559
560 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
561 !test_bit(QDF_USER, &qd_b->qd_flags)) {
562 if (test_bit(QDF_USER, &qd_a->qd_flags))
48fac179 563 return -1;
b3b94faa 564 else
48fac179 565 return 1;
b3b94faa 566 }
48fac179
SW
567 if (qd_a->qd_id < qd_b->qd_id)
568 return -1;
569 if (qd_a->qd_id > qd_b->qd_id)
570 return 1;
b3b94faa 571
48fac179 572 return 0;
b3b94faa
DT
573}
574
cd915493 575static void do_qc(struct gfs2_quota_data *qd, s64 change)
b3b94faa
DT
576{
577 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 578 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 579 struct gfs2_quota_change *qc = qd->qd_bh_qc;
cd915493 580 s64 x;
b3b94faa 581
f55ab26a 582 mutex_lock(&sdp->sd_quota_mutex);
d4e9c4c3 583 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
b3b94faa
DT
584
585 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
586 qc->qc_change = 0;
587 qc->qc_flags = 0;
588 if (test_bit(QDF_USER, &qd->qd_flags))
589 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
590 qc->qc_id = cpu_to_be32(qd->qd_id);
591 }
592
b44b84d7 593 x = be64_to_cpu(qc->qc_change) + change;
b3b94faa
DT
594 qc->qc_change = cpu_to_be64(x);
595
22077f57 596 spin_lock(&qd_lru_lock);
b3b94faa 597 qd->qd_change = x;
22077f57 598 spin_unlock(&qd_lru_lock);
b3b94faa
DT
599
600 if (!x) {
601 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
602 clear_bit(QDF_CHANGE, &qd->qd_flags);
603 qc->qc_flags = 0;
604 qc->qc_id = 0;
605 slot_put(qd);
606 qd_put(qd);
607 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
608 qd_hold(qd);
609 slot_hold(qd);
610 }
907b9bce 611
f55ab26a 612 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
613}
614
18ec7d5c 615/**
1e72c0f7
SW
616 * gfs2_adjust_quota - adjust record of current block usage
617 * @ip: The quota inode
618 * @loc: Offset of the entry in the quota file
e285c100 619 * @change: The amount of usage change to record
1e72c0f7 620 * @qd: The quota data
e285c100 621 * @fdq: The updated limits to record
18ec7d5c
SW
622 *
623 * This function was mostly borrowed from gfs2_block_truncate_page which was
624 * in turn mostly borrowed from ext3
1e72c0f7
SW
625 *
626 * Returns: 0 or -ve on error
18ec7d5c 627 */
1e72c0f7 628
18ec7d5c 629static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
e285c100
SW
630 s64 change, struct gfs2_quota_data *qd,
631 struct fs_disk_quota *fdq)
18ec7d5c 632{
feaa7bba 633 struct inode *inode = &ip->i_inode;
18ec7d5c
SW
634 struct address_space *mapping = inode->i_mapping;
635 unsigned long index = loc >> PAGE_CACHE_SHIFT;
1990e917 636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
18ec7d5c 637 unsigned blocksize, iblock, pos;
e285c100 638 struct buffer_head *bh, *dibh;
18ec7d5c 639 struct page *page;
7e619bc3
AD
640 void *kaddr, *ptr;
641 struct gfs2_quota q, *qp;
642 int err, nbytes;
e285c100 643 u64 size;
18ec7d5c 644
20b95bf2 645 if (gfs2_is_stuffed(ip))
0fd53554 646 gfs2_unstuff_dinode(ip, NULL);
7e619bc3
AD
647
648 memset(&q, 0, sizeof(struct gfs2_quota));
649 err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
650 if (err < 0)
651 return err;
652
653 err = -EIO;
654 qp = &q;
655 qp->qu_value = be64_to_cpu(qp->qu_value);
656 qp->qu_value += change;
657 qp->qu_value = cpu_to_be64(qp->qu_value);
658 qd->qd_qb.qb_value = qp->qu_value;
659 if (fdq) {
660 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
661 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
662 qd->qd_qb.qb_warn = qp->qu_warn;
663 }
664 if (fdq->d_fieldmask & FS_DQ_BHARD) {
665 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
666 qd->qd_qb.qb_limit = qp->qu_limit;
667 }
668 }
669
670 /* Write the quota into the quota file on disk */
671 ptr = qp;
672 nbytes = sizeof(struct gfs2_quota);
673get_a_page:
18ec7d5c
SW
674 page = grab_cache_page(mapping, index);
675 if (!page)
676 return -ENOMEM;
677
678 blocksize = inode->i_sb->s_blocksize;
679 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
680
681 if (!page_has_buffers(page))
682 create_empty_buffers(page, blocksize, 0);
683
684 bh = page_buffers(page);
685 pos = blocksize;
686 while (offset >= pos) {
687 bh = bh->b_this_page;
688 iblock++;
689 pos += blocksize;
690 }
691
692 if (!buffer_mapped(bh)) {
e9e1ef2b 693 gfs2_block_map(inode, iblock, bh, 1);
18ec7d5c 694 if (!buffer_mapped(bh))
7e619bc3
AD
695 goto unlock_out;
696 /* If it's a newly allocated disk block for quota, zero it */
8b421601
AD
697 if (buffer_new(bh))
698 zero_user(page, pos - blocksize, bh->b_size);
18ec7d5c
SW
699 }
700
701 if (PageUptodate(page))
702 set_buffer_uptodate(bh);
703
704 if (!buffer_uptodate(bh)) {
2e565bb6 705 ll_rw_block(READ_META, 1, &bh);
18ec7d5c
SW
706 wait_on_buffer(bh);
707 if (!buffer_uptodate(bh))
7e619bc3 708 goto unlock_out;
18ec7d5c
SW
709 }
710
711 gfs2_trans_add_bh(ip->i_gl, bh, 0);
712
713 kaddr = kmap_atomic(page, KM_USER0);
7e619bc3
AD
714 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
715 nbytes = PAGE_CACHE_SIZE - offset;
716 memcpy(kaddr + offset, ptr, nbytes);
18ec7d5c
SW
717 flush_dcache_page(page);
718 kunmap_atomic(kaddr, KM_USER0);
7e619bc3
AD
719 unlock_page(page);
720 page_cache_release(page);
721
722 /* If quota straddles page boundary, we need to update the rest of the
723 * quota at the beginning of the next page */
8b421601 724 if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
7e619bc3
AD
725 ptr = ptr + nbytes;
726 nbytes = sizeof(struct gfs2_quota) - nbytes;
727 offset = 0;
728 index++;
729 goto get_a_page;
730 }
e285c100 731
7e619bc3 732 /* Update the disk inode timestamp and size (if extended) */
e285c100
SW
733 err = gfs2_meta_inode_buffer(ip, &dibh);
734 if (err)
7e619bc3 735 goto out;
e285c100
SW
736
737 size = loc + sizeof(struct gfs2_quota);
738 if (size > inode->i_size) {
739 ip->i_disksize = size;
740 i_size_write(inode, size);
741 }
742 inode->i_mtime = inode->i_atime = CURRENT_TIME;
743 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
744 gfs2_dinode_out(ip, dibh->b_data);
745 brelse(dibh);
746 mark_inode_dirty(inode);
747
7e619bc3
AD
748out:
749 return err;
750unlock_out:
18ec7d5c
SW
751 unlock_page(page);
752 page_cache_release(page);
753 return err;
754}
755
b3b94faa
DT
756static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
757{
758 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
feaa7bba 759 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa
DT
760 unsigned int data_blocks, ind_blocks;
761 struct gfs2_holder *ghs, i_gh;
762 unsigned int qx, x;
763 struct gfs2_quota_data *qd;
f42faf4f 764 loff_t offset;
20b95bf2 765 unsigned int nalloc = 0, blocks;
b3b94faa
DT
766 struct gfs2_alloc *al = NULL;
767 int error;
768
769 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
770 &data_blocks, &ind_blocks);
771
16c5f06f 772 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
b3b94faa
DT
773 if (!ghs)
774 return -ENOMEM;
775
776 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
e285c100 777 mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
b3b94faa 778 for (qx = 0; qx < num_qd; qx++) {
1e72c0f7 779 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
b3b94faa
DT
780 GL_NOCACHE, &ghs[qx]);
781 if (error)
782 goto out;
783 }
784
785 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
786 if (error)
787 goto out;
788
789 for (x = 0; x < num_qd; x++) {
790 int alloc_required;
791
792 offset = qd2offset(qda[x]);
793 error = gfs2_write_alloc_required(ip, offset,
794 sizeof(struct gfs2_quota),
795 &alloc_required);
796 if (error)
797 goto out_gunlock;
798 if (alloc_required)
799 nalloc++;
800 }
801
20b95bf2
AD
802 al = gfs2_alloc_get(ip);
803 if (!al) {
804 error = -ENOMEM;
805 goto out_gunlock;
806 }
807 /*
808 * 1 blk for unstuffing inode if stuffed. We add this extra
809 * block to the reservation unconditionally. If the inode
810 * doesn't need unstuffing, the block will be released to the
811 * rgrp since it won't be allocated during the transaction
812 */
813 al->al_requested = 1;
7e619bc3
AD
814 /* +3 in the end for unstuffing block, inode size update block
815 * and another block in case quota straddles page boundary and
816 * two blocks need to be updated instead of 1 */
817 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
b3b94faa 818
20b95bf2
AD
819 if (nalloc)
820 al->al_requested += nalloc * (data_blocks + ind_blocks);
821 error = gfs2_inplace_reserve(ip);
822 if (error)
823 goto out_alloc;
b3b94faa 824
20b95bf2
AD
825 if (nalloc)
826 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
827
828 error = gfs2_trans_begin(sdp, blocks, 0);
829 if (error)
830 goto out_ipres;
b3b94faa
DT
831
832 for (x = 0; x < num_qd; x++) {
b3b94faa
DT
833 qd = qda[x];
834 offset = qd2offset(qd);
e285c100 835 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
18ec7d5c 836 if (error)
b3b94faa 837 goto out_end_trans;
b3b94faa
DT
838
839 do_qc(qd, -qd->qd_change_sync);
b3b94faa
DT
840 }
841
842 error = 0;
843
a91ea69f 844out_end_trans:
b3b94faa 845 gfs2_trans_end(sdp);
a91ea69f 846out_ipres:
20b95bf2 847 gfs2_inplace_release(ip);
a91ea69f 848out_alloc:
20b95bf2 849 gfs2_alloc_put(ip);
a91ea69f 850out_gunlock:
b3b94faa 851 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 852out:
b3b94faa
DT
853 while (qx--)
854 gfs2_glock_dq_uninit(&ghs[qx]);
e285c100 855 mutex_unlock(&ip->i_inode.i_mutex);
b3b94faa 856 kfree(ghs);
b09e593d 857 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
b3b94faa
DT
858 return error;
859}
860
e285c100
SW
861static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
862{
863 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
864 struct gfs2_quota q;
865 struct gfs2_quota_lvb *qlvb;
866 loff_t pos;
867 int error;
868
869 memset(&q, 0, sizeof(struct gfs2_quota));
870 pos = qd2offset(qd);
871 error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
872 if (error < 0)
873 return error;
874
875 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
876 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
877 qlvb->__pad = 0;
878 qlvb->qb_limit = q.qu_limit;
879 qlvb->qb_warn = q.qu_warn;
880 qlvb->qb_value = q.qu_value;
881 qd->qd_qb = *qlvb;
882
883 return 0;
884}
885
b3b94faa
DT
886static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
887 struct gfs2_holder *q_gh)
888{
889 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 890 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa 891 struct gfs2_holder i_gh;
b3b94faa
DT
892 int error;
893
a91ea69f 894restart:
b3b94faa
DT
895 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
896 if (error)
897 return error;
898
e9fc2aa0 899 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
b3b94faa 900
e9fc2aa0 901 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
b3b94faa 902 gfs2_glock_dq_uninit(q_gh);
91094d0f
SW
903 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
904 GL_NOCACHE, q_gh);
b3b94faa
DT
905 if (error)
906 return error;
907
e9fc2aa0 908 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
b3b94faa
DT
909 if (error)
910 goto fail;
911
e285c100
SW
912 error = update_qd(sdp, qd);
913 if (error)
1e72c0f7 914 goto fail_gunlock;
b3b94faa 915
e285c100 916 gfs2_glock_dq_uninit(&i_gh);
91094d0f
SW
917 gfs2_glock_dq_uninit(q_gh);
918 force_refresh = 0;
919 goto restart;
b3b94faa
DT
920 }
921
922 return 0;
923
a91ea69f 924fail_gunlock:
b3b94faa 925 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 926fail:
b3b94faa 927 gfs2_glock_dq_uninit(q_gh);
b3b94faa
DT
928 return error;
929}
930
cd915493 931int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 932{
feaa7bba 933 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 934 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
935 unsigned int x;
936 int error = 0;
937
938 gfs2_quota_hold(ip, uid, gid);
939
940 if (capable(CAP_SYS_RESOURCE) ||
941 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
942 return 0;
943
944 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
945 sort_qd, NULL);
946
947 for (x = 0; x < al->al_qd_num; x++) {
948 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
949 if (error)
950 break;
951 }
952
953 if (!error)
954 set_bit(GIF_QD_LOCKED, &ip->i_flags);
955 else {
956 while (x--)
957 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
958 gfs2_quota_unhold(ip);
959 }
960
961 return error;
962}
963
964static int need_sync(struct gfs2_quota_data *qd)
965{
966 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
967 struct gfs2_tune *gt = &sdp->sd_tune;
cd915493 968 s64 value;
b3b94faa
DT
969 unsigned int num, den;
970 int do_sync = 1;
971
972 if (!qd->qd_qb.qb_limit)
973 return 0;
974
22077f57 975 spin_lock(&qd_lru_lock);
b3b94faa 976 value = qd->qd_change;
22077f57 977 spin_unlock(&qd_lru_lock);
b3b94faa
DT
978
979 spin_lock(&gt->gt_spin);
980 num = gt->gt_quota_scale_num;
981 den = gt->gt_quota_scale_den;
982 spin_unlock(&gt->gt_spin);
983
984 if (value < 0)
985 do_sync = 0;
e9fc2aa0
SW
986 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
987 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
988 do_sync = 0;
989 else {
990 value *= gfs2_jindex_size(sdp) * num;
4abaca17 991 value = div_s64(value, den);
e9fc2aa0 992 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
cd915493 993 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
994 do_sync = 0;
995 }
996
997 return do_sync;
998}
999
1000void gfs2_quota_unlock(struct gfs2_inode *ip)
1001{
6dbd8224 1002 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
1003 struct gfs2_quota_data *qda[4];
1004 unsigned int count = 0;
1005 unsigned int x;
1006
1007 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1008 goto out;
1009
1010 for (x = 0; x < al->al_qd_num; x++) {
1011 struct gfs2_quota_data *qd;
1012 int sync;
1013
1014 qd = al->al_qd[x];
1015 sync = need_sync(qd);
1016
1017 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
1018
1019 if (sync && qd_trylock(qd))
1020 qda[count++] = qd;
1021 }
1022
1023 if (count) {
1024 do_sync(count, qda);
1025 for (x = 0; x < count; x++)
1026 qd_unlock(qda[x]);
1027 }
1028
a91ea69f 1029out:
b3b94faa
DT
1030 gfs2_quota_unhold(ip);
1031}
1032
1033#define MAX_LINE 256
1034
1035static int print_message(struct gfs2_quota_data *qd, char *type)
1036{
1037 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
b3b94faa 1038
2ec46505 1039 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
02630a12
SW
1040 sdp->sd_fsname, type,
1041 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1042 qd->qd_id);
b3b94faa
DT
1043
1044 return 0;
1045}
1046
cd915493 1047int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 1048{
feaa7bba 1049 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 1050 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa 1051 struct gfs2_quota_data *qd;
cd915493 1052 s64 value;
b3b94faa
DT
1053 unsigned int x;
1054 int error = 0;
1055
1056 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1057 return 0;
1058
1059 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1060 return 0;
1061
1062 for (x = 0; x < al->al_qd_num; x++) {
1063 qd = al->al_qd[x];
1064
1065 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1066 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1067 continue;
1068
e9fc2aa0 1069 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
22077f57 1070 spin_lock(&qd_lru_lock);
b3b94faa 1071 value += qd->qd_change;
22077f57 1072 spin_unlock(&qd_lru_lock);
b3b94faa 1073
cd915493 1074 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
b3b94faa 1075 print_message(qd, "exceeded");
2ec46505
SW
1076 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1077 USRQUOTA : GRPQUOTA, qd->qd_id,
1078 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1079
b3b94faa
DT
1080 error = -EDQUOT;
1081 break;
e9fc2aa0 1082 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
cd915493 1083 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
b3b94faa 1084 time_after_eq(jiffies, qd->qd_last_warn +
568f4c96
SW
1085 gfs2_tune_get(sdp,
1086 gt_quota_warn_period) * HZ)) {
2ec46505
SW
1087 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1088 USRQUOTA : GRPQUOTA, qd->qd_id,
1089 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
b3b94faa
DT
1090 error = print_message(qd, "warning");
1091 qd->qd_last_warn = jiffies;
1092 }
1093 }
1094
1095 return error;
1096}
1097
cd915493
SW
1098void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1099 u32 uid, u32 gid)
b3b94faa 1100{
6dbd8224 1101 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
1102 struct gfs2_quota_data *qd;
1103 unsigned int x;
b3b94faa 1104
feaa7bba 1105 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
b3b94faa 1106 return;
383f01fb 1107 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
b3b94faa
DT
1108 return;
1109
1110 for (x = 0; x < al->al_qd_num; x++) {
1111 qd = al->al_qd[x];
1112
1113 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1114 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1115 do_qc(qd, change);
b3b94faa
DT
1116 }
1117 }
1118}
1119
5fb324ad 1120int gfs2_quota_sync(struct super_block *sb, int type, int wait)
b3b94faa 1121{
8c42d637 1122 struct gfs2_sbd *sdp = sb->s_fs_info;
b3b94faa
DT
1123 struct gfs2_quota_data **qda;
1124 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1125 unsigned int num_qd;
1126 unsigned int x;
1127 int error = 0;
1128
1129 sdp->sd_quota_sync_gen++;
1130
1131 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1132 if (!qda)
1133 return -ENOMEM;
1134
1135 do {
1136 num_qd = 0;
1137
1138 for (;;) {
1139 error = qd_fish(sdp, qda + num_qd);
1140 if (error || !qda[num_qd])
1141 break;
1142 if (++num_qd == max_qd)
1143 break;
1144 }
1145
1146 if (num_qd) {
1147 if (!error)
1148 error = do_sync(num_qd, qda);
1149 if (!error)
1150 for (x = 0; x < num_qd; x++)
1151 qda[x]->qd_sync_gen =
1152 sdp->sd_quota_sync_gen;
1153
1154 for (x = 0; x < num_qd; x++)
1155 qd_unlock(qda[x]);
1156 }
1157 } while (!error && num_qd == max_qd);
1158
1159 kfree(qda);
1160
1161 return error;
1162}
1163
5fb324ad
CH
1164static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1165{
1166 return gfs2_quota_sync(sb, type, 0);
1167}
1168
cd915493 1169int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
b3b94faa
DT
1170{
1171 struct gfs2_quota_data *qd;
1172 struct gfs2_holder q_gh;
1173 int error;
1174
6a6ada81 1175 error = qd_get(sdp, user, id, &qd);
b3b94faa
DT
1176 if (error)
1177 return error;
1178
1179 error = do_glock(qd, FORCE, &q_gh);
1180 if (!error)
1181 gfs2_glock_dq_uninit(&q_gh);
1182
1183 qd_put(qd);
b3b94faa
DT
1184 return error;
1185}
1186
bb8d8a6f
SW
1187static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1188{
1189 const struct gfs2_quota_change *str = buf;
1190
1191 qc->qc_change = be64_to_cpu(str->qc_change);
1192 qc->qc_flags = be32_to_cpu(str->qc_flags);
1193 qc->qc_id = be32_to_cpu(str->qc_id);
1194}
1195
b3b94faa
DT
1196int gfs2_quota_init(struct gfs2_sbd *sdp)
1197{
feaa7bba 1198 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
c9e98886 1199 unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
b3b94faa
DT
1200 unsigned int x, slot = 0;
1201 unsigned int found = 0;
cd915493
SW
1202 u64 dblock;
1203 u32 extlen = 0;
b3b94faa
DT
1204 int error;
1205
c9e98886
SW
1206 if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
1207 ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
b3b94faa 1208 gfs2_consist_inode(ip);
907b9bce 1209 return -EIO;
b3b94faa
DT
1210 }
1211 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
5c676f6d 1212 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
b3b94faa
DT
1213
1214 error = -ENOMEM;
1215
1216 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
16c5f06f 1217 sizeof(unsigned char *), GFP_NOFS);
b3b94faa
DT
1218 if (!sdp->sd_quota_bitmap)
1219 return error;
1220
1221 for (x = 0; x < sdp->sd_quota_chunks; x++) {
16c5f06f 1222 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
b3b94faa
DT
1223 if (!sdp->sd_quota_bitmap[x])
1224 goto fail;
1225 }
1226
1227 for (x = 0; x < blocks; x++) {
1228 struct buffer_head *bh;
1229 unsigned int y;
1230
1231 if (!extlen) {
1232 int new = 0;
feaa7bba 1233 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
b3b94faa
DT
1234 if (error)
1235 goto fail;
1236 }
b3b94faa 1237 error = -EIO;
7276b3b0
SW
1238 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1239 if (!bh)
1240 goto fail;
b3b94faa
DT
1241 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1242 brelse(bh);
1243 goto fail;
1244 }
1245
7276b3b0 1246 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
b3b94faa 1247 y++, slot++) {
b62f963e 1248 struct gfs2_quota_change_host qc;
b3b94faa
DT
1249 struct gfs2_quota_data *qd;
1250
1251 gfs2_quota_change_in(&qc, bh->b_data +
1252 sizeof(struct gfs2_meta_header) +
1253 y * sizeof(struct gfs2_quota_change));
1254 if (!qc.qc_change)
1255 continue;
1256
1257 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1258 qc.qc_id, &qd);
1259 if (error) {
1260 brelse(bh);
1261 goto fail;
1262 }
1263
1264 set_bit(QDF_CHANGE, &qd->qd_flags);
1265 qd->qd_change = qc.qc_change;
1266 qd->qd_slot = slot;
1267 qd->qd_slot_count = 1;
b3b94faa 1268
0a7ab79c 1269 spin_lock(&qd_lru_lock);
b3b94faa
DT
1270 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1271 list_add(&qd->qd_list, &sdp->sd_quota_list);
1272 atomic_inc(&sdp->sd_quota_count);
0a7ab79c 1273 spin_unlock(&qd_lru_lock);
b3b94faa
DT
1274
1275 found++;
1276 }
1277
1278 brelse(bh);
1279 dblock++;
1280 extlen--;
1281 }
1282
1283 if (found)
1284 fs_info(sdp, "found %u quota changes\n", found);
1285
1286 return 0;
1287
a91ea69f 1288fail:
b3b94faa
DT
1289 gfs2_quota_cleanup(sdp);
1290 return error;
1291}
1292
b3b94faa
DT
1293void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1294{
1295 struct list_head *head = &sdp->sd_quota_list;
1296 struct gfs2_quota_data *qd;
1297 unsigned int x;
1298
0a7ab79c 1299 spin_lock(&qd_lru_lock);
b3b94faa
DT
1300 while (!list_empty(head)) {
1301 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1302
0a7ab79c
AD
1303 if (atomic_read(&qd->qd_count) > 1 ||
1304 (atomic_read(&qd->qd_count) &&
1305 !test_bit(QDF_CHANGE, &qd->qd_flags))) {
0a7ab79c
AD
1306 list_move(&qd->qd_list, head);
1307 spin_unlock(&qd_lru_lock);
b3b94faa 1308 schedule();
0a7ab79c 1309 spin_lock(&qd_lru_lock);
b3b94faa
DT
1310 continue;
1311 }
1312
1313 list_del(&qd->qd_list);
0a7ab79c
AD
1314 /* Also remove if this qd exists in the reclaim list */
1315 if (!list_empty(&qd->qd_reclaim)) {
1316 list_del_init(&qd->qd_reclaim);
1317 atomic_dec(&qd_lru_count);
1318 }
b3b94faa 1319 atomic_dec(&sdp->sd_quota_count);
0a7ab79c 1320 spin_unlock(&qd_lru_lock);
b3b94faa 1321
0a7ab79c 1322 if (!atomic_read(&qd->qd_count)) {
b3b94faa
DT
1323 gfs2_assert_warn(sdp, !qd->qd_change);
1324 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1325 } else
1326 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1327 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1328
f057f6cd 1329 gfs2_glock_put(qd->qd_gl);
37b2c837 1330 kmem_cache_free(gfs2_quotad_cachep, qd);
b3b94faa 1331
0a7ab79c 1332 spin_lock(&qd_lru_lock);
b3b94faa 1333 }
0a7ab79c 1334 spin_unlock(&qd_lru_lock);
b3b94faa
DT
1335
1336 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1337
1338 if (sdp->sd_quota_bitmap) {
1339 for (x = 0; x < sdp->sd_quota_chunks; x++)
1340 kfree(sdp->sd_quota_bitmap[x]);
1341 kfree(sdp->sd_quota_bitmap);
1342 }
1343}
1344
37b2c837
SW
1345static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1346{
1347 if (error == 0 || error == -EROFS)
1348 return;
1349 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1350 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1351}
1352
1353static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
8c42d637 1354 int (*fxn)(struct super_block *sb, int type),
37b2c837
SW
1355 unsigned long t, unsigned long *timeo,
1356 unsigned int *new_timeo)
1357{
1358 if (t >= *timeo) {
8c42d637 1359 int error = fxn(sdp->sd_vfs, 0);
37b2c837
SW
1360 quotad_error(sdp, msg, error);
1361 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1362 } else {
1363 *timeo -= t;
1364 }
1365}
1366
813e0c46
SW
1367static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1368{
1369 struct gfs2_inode *ip;
1370
1371 while(1) {
1372 ip = NULL;
1373 spin_lock(&sdp->sd_trunc_lock);
1374 if (!list_empty(&sdp->sd_trunc_list)) {
1375 ip = list_entry(sdp->sd_trunc_list.next,
1376 struct gfs2_inode, i_trunc_list);
1377 list_del_init(&ip->i_trunc_list);
1378 }
1379 spin_unlock(&sdp->sd_trunc_lock);
1380 if (ip == NULL)
1381 return;
1382 gfs2_glock_finish_truncate(ip);
1383 }
1384}
1385
3d3c10f2
BM
1386void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1387 if (!sdp->sd_statfs_force_sync) {
1388 sdp->sd_statfs_force_sync = 1;
1389 wake_up(&sdp->sd_quota_wait);
1390 }
1391}
1392
1393
37b2c837
SW
1394/**
1395 * gfs2_quotad - Write cached quota changes into the quota file
1396 * @sdp: Pointer to GFS2 superblock
1397 *
1398 */
1399
1400int gfs2_quotad(void *data)
1401{
1402 struct gfs2_sbd *sdp = data;
1403 struct gfs2_tune *tune = &sdp->sd_tune;
1404 unsigned long statfs_timeo = 0;
1405 unsigned long quotad_timeo = 0;
1406 unsigned long t = 0;
1407 DEFINE_WAIT(wait);
813e0c46 1408 int empty;
37b2c837
SW
1409
1410 while (!kthread_should_stop()) {
1411
1412 /* Update the master statfs file */
3d3c10f2
BM
1413 if (sdp->sd_statfs_force_sync) {
1414 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1415 quotad_error(sdp, "statfs", error);
1416 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1417 }
1418 else
1419 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1420 &statfs_timeo,
1421 &tune->gt_statfs_quantum);
37b2c837
SW
1422
1423 /* Update quota file */
5fb324ad 1424 quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
37b2c837
SW
1425 &quotad_timeo, &tune->gt_quota_quantum);
1426
813e0c46
SW
1427 /* Check for & recover partially truncated inodes */
1428 quotad_check_trunc_list(sdp);
1429
37b2c837
SW
1430 if (freezing(current))
1431 refrigerator();
1432 t = min(quotad_timeo, statfs_timeo);
1433
7fa5d20d 1434 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
813e0c46
SW
1435 spin_lock(&sdp->sd_trunc_lock);
1436 empty = list_empty(&sdp->sd_trunc_list);
1437 spin_unlock(&sdp->sd_trunc_lock);
3d3c10f2 1438 if (empty && !sdp->sd_statfs_force_sync)
813e0c46
SW
1439 t -= schedule_timeout(t);
1440 else
1441 t = 0;
37b2c837
SW
1442 finish_wait(&sdp->sd_quota_wait, &wait);
1443 }
1444
1445 return 0;
1446}
1447
1d371b5e
SW
1448static int gfs2_quota_get_xstate(struct super_block *sb,
1449 struct fs_quota_stat *fqs)
1450{
1451 struct gfs2_sbd *sdp = sb->s_fs_info;
1452
1453 memset(fqs, 0, sizeof(struct fs_quota_stat));
1454 fqs->qs_version = FS_QSTAT_VERSION;
ad6bb90f
CH
1455
1456 switch (sdp->sd_args.ar_quota) {
1457 case GFS2_QUOTA_ON:
1458 fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
1459 /*FALLTHRU*/
1460 case GFS2_QUOTA_ACCOUNT:
1461 fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
1462 break;
1463 case GFS2_QUOTA_OFF:
1464 break;
1465 }
1466
1d371b5e
SW
1467 if (sdp->sd_quota_inode) {
1468 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1469 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1470 }
1471 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1472 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1473 fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1474 return 0;
1475}
1476
b9b2dd36
CH
1477static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1478 struct fs_disk_quota *fdq)
113d6b3c
SW
1479{
1480 struct gfs2_sbd *sdp = sb->s_fs_info;
1481 struct gfs2_quota_lvb *qlvb;
1482 struct gfs2_quota_data *qd;
1483 struct gfs2_holder q_gh;
1484 int error;
1485
1486 memset(fdq, 0, sizeof(struct fs_disk_quota));
1487
1488 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1489 return -ESRCH; /* Crazy XFS error code */
1490
1491 if (type == USRQUOTA)
1492 type = QUOTA_USER;
1493 else if (type == GRPQUOTA)
1494 type = QUOTA_GROUP;
1495 else
1496 return -EINVAL;
1497
1498 error = qd_get(sdp, type, id, &qd);
1499 if (error)
1500 return error;
1501 error = do_glock(qd, FORCE, &q_gh);
1502 if (error)
1503 goto out;
1504
1505 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1506 fdq->d_version = FS_DQUOT_VERSION;
1507 fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
1508 fdq->d_id = id;
1509 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
1510 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
1511 fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
1512
1513 gfs2_glock_dq_uninit(&q_gh);
1514out:
1515 qd_put(qd);
1516 return error;
1517}
1518
e285c100
SW
1519/* GFS2 only supports a subset of the XFS fields */
1520#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
1521
c472b432
CH
1522static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1523 struct fs_disk_quota *fdq)
e285c100
SW
1524{
1525 struct gfs2_sbd *sdp = sb->s_fs_info;
1526 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1527 struct gfs2_quota_data *qd;
1528 struct gfs2_holder q_gh, i_gh;
1529 unsigned int data_blocks, ind_blocks;
1530 unsigned int blocks = 0;
1531 int alloc_required;
1532 struct gfs2_alloc *al;
1533 loff_t offset;
1534 int error;
1535
1536 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1537 return -ESRCH; /* Crazy XFS error code */
1538
1539 switch(type) {
1540 case USRQUOTA:
1541 type = QUOTA_USER;
1542 if (fdq->d_flags != XFS_USER_QUOTA)
1543 return -EINVAL;
1544 break;
1545 case GRPQUOTA:
1546 type = QUOTA_GROUP;
1547 if (fdq->d_flags != XFS_GROUP_QUOTA)
1548 return -EINVAL;
1549 break;
1550 default:
1551 return -EINVAL;
1552 }
1553
1554 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1555 return -EINVAL;
1556 if (fdq->d_id != id)
1557 return -EINVAL;
1558
1559 error = qd_get(sdp, type, id, &qd);
1560 if (error)
1561 return error;
1562
1563 mutex_lock(&ip->i_inode.i_mutex);
1564 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1565 if (error)
1566 goto out_put;
1567 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1568 if (error)
1569 goto out_q;
1570
1571 /* Check for existing entry, if none then alloc new blocks */
1572 error = update_qd(sdp, qd);
1573 if (error)
1574 goto out_i;
1575
1576 /* If nothing has changed, this is a no-op */
1577 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1578 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
1579 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1580 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1581 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
1582 fdq->d_fieldmask ^= FS_DQ_BHARD;
1583 if (fdq->d_fieldmask == 0)
1584 goto out_i;
1585
1586 offset = qd2offset(qd);
1587 error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
1588 &alloc_required);
1589 if (error)
1590 goto out_i;
1591 if (alloc_required) {
1592 al = gfs2_alloc_get(ip);
1593 if (al == NULL)
1594 goto out_i;
1595 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1596 &data_blocks, &ind_blocks);
1597 blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1598 error = gfs2_inplace_reserve(ip);
1599 if (error)
1600 goto out_alloc;
1601 }
1602
1603 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
1604 if (error)
1605 goto out_release;
1606
1607 /* Apply changes */
1608 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1609
1610 gfs2_trans_end(sdp);
1611out_release:
1612 if (alloc_required) {
1613 gfs2_inplace_release(ip);
1614out_alloc:
1615 gfs2_alloc_put(ip);
1616 }
1617out_i:
1618 gfs2_glock_dq_uninit(&i_gh);
1619out_q:
1620 gfs2_glock_dq_uninit(&q_gh);
1621out_put:
1622 mutex_unlock(&ip->i_inode.i_mutex);
1623 qd_put(qd);
1624 return error;
1625}
1626
cc632e7f
SW
1627const struct quotactl_ops gfs2_quotactl_ops = {
1628 .quota_sync = gfs2_quota_sync,
1d371b5e 1629 .get_xstate = gfs2_quota_get_xstate,
b9b2dd36 1630 .get_dqblk = gfs2_get_dqblk,
c472b432 1631 .set_dqblk = gfs2_set_dqblk,
cc632e7f
SW
1632};
1633