2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/tty.h>
45 #include <linux/sort.h>
46 #include <asm/semaphore.h>
63 static uint64_t qd2offset(struct gfs2_quota_data *qd)
67 offset = 2 * (uint64_t)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
68 offset *= sizeof(struct gfs2_quota);
73 static int qd_alloc(struct gfs2_sbd *sdp, int user, uint32_t id,
74 struct gfs2_quota_data **qdp)
76 struct gfs2_quota_data *qd;
79 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
86 set_bit(QDF_USER, &qd->qd_flags);
89 error = gfs2_glock_get(sdp, 2 * (uint64_t)id + !user,
90 &gfs2_quota_glops, CREATE, &qd->qd_gl);
94 error = gfs2_lvb_hold(qd->qd_gl);
95 gfs2_glock_put(qd->qd_gl);
108 static int qd_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
109 struct gfs2_quota_data **qdp)
111 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
118 spin_lock(&sdp->sd_quota_spin);
119 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
120 if (qd->qd_id == id &&
121 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
133 list_add(&qd->qd_list, &sdp->sd_quota_list);
134 atomic_inc(&sdp->sd_quota_count);
138 spin_unlock(&sdp->sd_quota_spin);
142 gfs2_lvb_unhold(new_qd->qd_gl);
149 error = qd_alloc(sdp, user, id, &new_qd);
155 static void qd_hold(struct gfs2_quota_data *qd)
157 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
159 spin_lock(&sdp->sd_quota_spin);
160 gfs2_assert(sdp, qd->qd_count);
162 spin_unlock(&sdp->sd_quota_spin);
165 static void qd_put(struct gfs2_quota_data *qd)
167 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
168 spin_lock(&sdp->sd_quota_spin);
169 gfs2_assert(sdp, qd->qd_count);
171 qd->qd_last_touched = jiffies;
172 spin_unlock(&sdp->sd_quota_spin);
175 static int slot_get(struct gfs2_quota_data *qd)
177 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
178 unsigned int c, o = 0, b;
179 unsigned char byte = 0;
181 spin_lock(&sdp->sd_quota_spin);
183 if (qd->qd_slot_count++) {
184 spin_unlock(&sdp->sd_quota_spin);
188 for (c = 0; c < sdp->sd_quota_chunks; c++)
189 for (o = 0; o < PAGE_SIZE; o++) {
190 byte = sdp->sd_quota_bitmap[c][o];
198 for (b = 0; b < 8; b++)
199 if (!(byte & (1 << b)))
201 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
203 if (qd->qd_slot >= sdp->sd_quota_slots)
206 sdp->sd_quota_bitmap[c][o] |= 1 << b;
208 spin_unlock(&sdp->sd_quota_spin);
214 spin_unlock(&sdp->sd_quota_spin);
218 static void slot_hold(struct gfs2_quota_data *qd)
220 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
222 spin_lock(&sdp->sd_quota_spin);
223 gfs2_assert(sdp, qd->qd_slot_count);
225 spin_unlock(&sdp->sd_quota_spin);
228 static void slot_put(struct gfs2_quota_data *qd)
230 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
232 spin_lock(&sdp->sd_quota_spin);
233 gfs2_assert(sdp, qd->qd_slot_count);
234 if (!--qd->qd_slot_count) {
235 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
238 spin_unlock(&sdp->sd_quota_spin);
241 static int bh_get(struct gfs2_quota_data *qd)
243 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
244 struct gfs2_inode *ip = sdp->sd_qc_inode;
245 unsigned int block, offset;
248 struct buffer_head *bh;
251 down(&sdp->sd_quota_mutex);
253 if (qd->qd_bh_count++) {
254 up(&sdp->sd_quota_mutex);
258 block = qd->qd_slot / sdp->sd_qc_per_block;
259 offset = qd->qd_slot % sdp->sd_qc_per_block;;
261 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
264 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
268 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
272 qd->qd_bh_qc = (struct gfs2_quota_change *)
273 (bh->b_data + sizeof(struct gfs2_meta_header) +
274 offset * sizeof(struct gfs2_quota_change));
276 up(&sdp->sd_quota_mutex);
285 up(&sdp->sd_quota_mutex);
289 static void bh_put(struct gfs2_quota_data *qd)
291 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
293 down(&sdp->sd_quota_mutex);
294 gfs2_assert(sdp, qd->qd_bh_count);
295 if (!--qd->qd_bh_count) {
300 up(&sdp->sd_quota_mutex);
303 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
305 struct gfs2_quota_data *qd = NULL;
311 if (sdp->sd_vfs->s_flags & MS_RDONLY)
314 spin_lock(&sdp->sd_quota_spin);
316 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
317 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
318 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
319 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
322 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
324 set_bit(QDF_LOCKED, &qd->qd_flags);
325 gfs2_assert_warn(sdp, qd->qd_count);
327 qd->qd_change_sync = qd->qd_change;
328 gfs2_assert_warn(sdp, qd->qd_slot_count);
338 spin_unlock(&sdp->sd_quota_spin);
341 gfs2_assert_warn(sdp, qd->qd_change_sync);
344 clear_bit(QDF_LOCKED, &qd->qd_flags);
356 static int qd_trylock(struct gfs2_quota_data *qd)
358 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
360 if (sdp->sd_vfs->s_flags & MS_RDONLY)
363 spin_lock(&sdp->sd_quota_spin);
365 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
366 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
367 spin_unlock(&sdp->sd_quota_spin);
371 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
373 set_bit(QDF_LOCKED, &qd->qd_flags);
374 gfs2_assert_warn(sdp, qd->qd_count);
376 qd->qd_change_sync = qd->qd_change;
377 gfs2_assert_warn(sdp, qd->qd_slot_count);
380 spin_unlock(&sdp->sd_quota_spin);
382 gfs2_assert_warn(sdp, qd->qd_change_sync);
384 clear_bit(QDF_LOCKED, &qd->qd_flags);
393 static void qd_unlock(struct gfs2_quota_data *qd)
395 gfs2_assert_warn(qd->qd_gl->gl_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
396 clear_bit(QDF_LOCKED, &qd->qd_flags);
402 static int qdsb_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
403 struct gfs2_quota_data **qdp)
407 error = qd_get(sdp, user, id, create, qdp);
411 error = slot_get(*qdp);
415 error = bh_get(*qdp);
429 static void qdsb_put(struct gfs2_quota_data *qd)
436 int gfs2_quota_hold(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
438 struct gfs2_sbd *sdp = ip->i_sbd;
439 struct gfs2_alloc *al = &ip->i_alloc;
440 struct gfs2_quota_data **qd = al->al_qd;
443 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
444 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
447 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
450 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
456 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
462 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
463 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
470 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
471 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
480 gfs2_quota_unhold(ip);
485 void gfs2_quota_unhold(struct gfs2_inode *ip)
487 struct gfs2_sbd *sdp = ip->i_sbd;
488 struct gfs2_alloc *al = &ip->i_alloc;
491 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
493 for (x = 0; x < al->al_qd_num; x++) {
494 qdsb_put(al->al_qd[x]);
500 static int sort_qd(const void *a, const void *b)
502 struct gfs2_quota_data *qd_a = *(struct gfs2_quota_data **)a;
503 struct gfs2_quota_data *qd_b = *(struct gfs2_quota_data **)b;
506 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
507 !test_bit(QDF_USER, &qd_b->qd_flags)) {
508 if (test_bit(QDF_USER, &qd_a->qd_flags))
513 if (qd_a->qd_id < qd_b->qd_id)
515 else if (qd_a->qd_id > qd_b->qd_id)
522 static void do_qc(struct gfs2_quota_data *qd, int64_t change)
524 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
525 struct gfs2_inode *ip = sdp->sd_qc_inode;
526 struct gfs2_quota_change *qc = qd->qd_bh_qc;
529 down(&sdp->sd_quota_mutex);
530 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh);
532 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
535 if (test_bit(QDF_USER, &qd->qd_flags))
536 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
537 qc->qc_id = cpu_to_be32(qd->qd_id);
541 x = be64_to_cpu(x) + change;
542 qc->qc_change = cpu_to_be64(x);
544 spin_lock(&sdp->sd_quota_spin);
546 spin_unlock(&sdp->sd_quota_spin);
549 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
550 clear_bit(QDF_CHANGE, &qd->qd_flags);
555 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
560 up(&sdp->sd_quota_mutex);
563 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
565 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
566 struct gfs2_inode *ip = sdp->sd_quota_inode;
567 unsigned int data_blocks, ind_blocks;
568 struct gfs2_holder *ghs, i_gh;
570 struct gfs2_quota_data *qd;
572 unsigned int nalloc = 0;
573 struct gfs2_alloc *al = NULL;
576 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
577 &data_blocks, &ind_blocks);
579 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
583 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
584 for (qx = 0; qx < num_qd; qx++) {
585 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
587 GL_NOCACHE, &ghs[qx]);
592 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
596 for (x = 0; x < num_qd; x++) {
599 offset = qd2offset(qda[x]);
600 error = gfs2_write_alloc_required(ip, offset,
601 sizeof(struct gfs2_quota),
610 al = gfs2_alloc_get(ip);
612 al->al_requested = nalloc * (data_blocks + ind_blocks);
614 error = gfs2_inplace_reserve(ip);
618 error = gfs2_trans_begin(sdp,
619 al->al_rgd->rd_ri.ri_length +
620 num_qd * data_blocks +
621 nalloc * ind_blocks +
622 RES_DINODE + num_qd +
627 error = gfs2_trans_begin(sdp,
628 num_qd * data_blocks +
629 RES_DINODE + num_qd, 0);
634 for (x = 0; x < num_qd; x++) {
635 char buf[sizeof(struct gfs2_quota)];
639 offset = qd2offset(qd);
641 /* The quota file may not be a multiple of
642 sizeof(struct gfs2_quota) bytes. */
643 memset(buf, 0, sizeof(struct gfs2_quota));
645 error = gfs2_jdata_read_mem(ip, buf, offset,
646 sizeof(struct gfs2_quota));
650 gfs2_quota_in(&q, buf);
651 q.qu_value += qda[x]->qd_change_sync;
652 gfs2_quota_out(&q, buf);
654 error = gfs2_jdata_write_mem(ip, buf, offset,
655 sizeof(struct gfs2_quota));
658 else if (error != sizeof(struct gfs2_quota)) {
663 do_qc(qd, -qd->qd_change_sync);
665 memset(&qd->qd_qb, 0, sizeof(struct gfs2_quota_lvb));
666 qd->qd_qb.qb_magic = GFS2_MAGIC;
667 qd->qd_qb.qb_limit = q.qu_limit;
668 qd->qd_qb.qb_warn = q.qu_warn;
669 qd->qd_qb.qb_value = q.qu_value;
671 gfs2_quota_lvb_out(&qd->qd_qb, qd->qd_gl->gl_lvb);
681 gfs2_inplace_release(ip);
688 gfs2_glock_dq_uninit(&i_gh);
692 gfs2_glock_dq_uninit(&ghs[qx]);
694 gfs2_log_flush_glock(ip->i_gl);
699 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
700 struct gfs2_holder *q_gh)
702 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
703 struct gfs2_holder i_gh;
705 char buf[sizeof(struct gfs2_quota)];
709 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
713 gfs2_quota_lvb_in(&qd->qd_qb, qd->qd_gl->gl_lvb);
715 if (force_refresh || qd->qd_qb.qb_magic != GFS2_MAGIC) {
716 gfs2_glock_dq_uninit(q_gh);
717 error = gfs2_glock_nq_init(qd->qd_gl,
718 LM_ST_EXCLUSIVE, GL_NOCACHE,
723 error = gfs2_glock_nq_init(sdp->sd_quota_inode->i_gl,
729 memset(buf, 0, sizeof(struct gfs2_quota));
731 error = gfs2_jdata_read_mem(sdp->sd_quota_inode, buf,
733 sizeof(struct gfs2_quota));
737 gfs2_glock_dq_uninit(&i_gh);
739 gfs2_quota_in(&q, buf);
741 memset(&qd->qd_qb, 0, sizeof(struct gfs2_quota_lvb));
742 qd->qd_qb.qb_magic = GFS2_MAGIC;
743 qd->qd_qb.qb_limit = q.qu_limit;
744 qd->qd_qb.qb_warn = q.qu_warn;
745 qd->qd_qb.qb_value = q.qu_value;
747 gfs2_quota_lvb_out(&qd->qd_qb, qd->qd_gl->gl_lvb);
749 if (gfs2_glock_is_blocking(qd->qd_gl)) {
750 gfs2_glock_dq_uninit(q_gh);
759 gfs2_glock_dq_uninit(&i_gh);
762 gfs2_glock_dq_uninit(q_gh);
767 int gfs2_quota_lock(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
769 struct gfs2_sbd *sdp = ip->i_sbd;
770 struct gfs2_alloc *al = &ip->i_alloc;
774 gfs2_quota_hold(ip, uid, gid);
776 if (capable(CAP_SYS_RESOURCE) ||
777 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
780 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
783 for (x = 0; x < al->al_qd_num; x++) {
784 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
790 set_bit(GIF_QD_LOCKED, &ip->i_flags);
793 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
794 gfs2_quota_unhold(ip);
800 static int need_sync(struct gfs2_quota_data *qd)
802 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
803 struct gfs2_tune *gt = &sdp->sd_tune;
805 unsigned int num, den;
808 if (!qd->qd_qb.qb_limit)
811 spin_lock(&sdp->sd_quota_spin);
812 value = qd->qd_change;
813 spin_unlock(&sdp->sd_quota_spin);
815 spin_lock(>->gt_spin);
816 num = gt->gt_quota_scale_num;
817 den = gt->gt_quota_scale_den;
818 spin_unlock(>->gt_spin);
822 else if (qd->qd_qb.qb_value >= (int64_t)qd->qd_qb.qb_limit)
825 value *= gfs2_jindex_size(sdp) * num;
827 value += qd->qd_qb.qb_value;
828 if (value < (int64_t)qd->qd_qb.qb_limit)
835 void gfs2_quota_unlock(struct gfs2_inode *ip)
837 struct gfs2_alloc *al = &ip->i_alloc;
838 struct gfs2_quota_data *qda[4];
839 unsigned int count = 0;
842 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
845 for (x = 0; x < al->al_qd_num; x++) {
846 struct gfs2_quota_data *qd;
850 sync = need_sync(qd);
852 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
854 if (sync && qd_trylock(qd))
860 for (x = 0; x < count; x++)
865 gfs2_quota_unhold(ip);
870 static int print_message(struct gfs2_quota_data *qd, char *type)
872 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
876 line = kmalloc(MAX_LINE, GFP_KERNEL);
880 len = snprintf(line, MAX_LINE-1, "GFS2: fsid=%s: quota %s for %s %u\r\n",
881 sdp->sd_fsname, type,
882 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
884 line[MAX_LINE-1] = 0;
886 if (current->signal) { /* Is this test still required? */
887 tty_write_message(current->signal->tty, line);
895 int gfs2_quota_check(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
897 struct gfs2_sbd *sdp = ip->i_sbd;
898 struct gfs2_alloc *al = &ip->i_alloc;
899 struct gfs2_quota_data *qd;
904 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
907 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
910 for (x = 0; x < al->al_qd_num; x++) {
913 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
914 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
917 value = qd->qd_qb.qb_value;
918 spin_lock(&sdp->sd_quota_spin);
919 value += qd->qd_change;
920 spin_unlock(&sdp->sd_quota_spin);
922 if (qd->qd_qb.qb_limit && (int64_t)qd->qd_qb.qb_limit < value) {
923 print_message(qd, "exceeded");
926 } else if (qd->qd_qb.qb_warn &&
927 (int64_t)qd->qd_qb.qb_warn < value &&
928 time_after_eq(jiffies, qd->qd_last_warn +
929 gfs2_tune_get(sdp, gt_quota_warn_period) * HZ)) {
930 error = print_message(qd, "warning");
931 qd->qd_last_warn = jiffies;
938 void gfs2_quota_change(struct gfs2_inode *ip, int64_t change,
939 uint32_t uid, uint32_t gid)
941 struct gfs2_alloc *al = &ip->i_alloc;
942 struct gfs2_quota_data *qd;
944 unsigned int found = 0;
946 if (gfs2_assert_warn(ip->i_sbd, change))
948 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
951 for (x = 0; x < al->al_qd_num; x++) {
954 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
955 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
962 int gfs2_quota_sync(struct gfs2_sbd *sdp)
964 struct gfs2_quota_data **qda;
965 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
970 sdp->sd_quota_sync_gen++;
972 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
980 error = qd_fish(sdp, qda + num_qd);
981 if (error || !qda[num_qd])
983 if (++num_qd == max_qd)
989 error = do_sync(num_qd, qda);
991 for (x = 0; x < num_qd; x++)
992 qda[x]->qd_sync_gen =
993 sdp->sd_quota_sync_gen;
995 for (x = 0; x < num_qd; x++)
998 } while (!error && num_qd == max_qd);
1005 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, uint32_t id)
1007 struct gfs2_quota_data *qd;
1008 struct gfs2_holder q_gh;
1011 error = qd_get(sdp, user, id, CREATE, &qd);
1015 error = do_glock(qd, FORCE, &q_gh);
1017 gfs2_glock_dq_uninit(&q_gh);
1024 int gfs2_quota_read(struct gfs2_sbd *sdp, int user, uint32_t id,
1025 struct gfs2_quota *q)
1027 struct gfs2_quota_data *qd;
1028 struct gfs2_holder q_gh;
1031 if (((user) ? (id != current->fsuid) : (!in_group_p(id))) &&
1032 !capable(CAP_SYS_ADMIN))
1035 error = qd_get(sdp, user, id, CREATE, &qd);
1039 error = do_glock(qd, NO_FORCE, &q_gh);
1043 memset(q, 0, sizeof(struct gfs2_quota));
1044 q->qu_limit = qd->qd_qb.qb_limit;
1045 q->qu_warn = qd->qd_qb.qb_warn;
1046 q->qu_value = qd->qd_qb.qb_value;
1048 spin_lock(&sdp->sd_quota_spin);
1049 q->qu_value += qd->qd_change;
1050 spin_unlock(&sdp->sd_quota_spin);
1052 gfs2_glock_dq_uninit(&q_gh);
1060 int gfs2_quota_init(struct gfs2_sbd *sdp)
1062 struct gfs2_inode *ip = sdp->sd_qc_inode;
1063 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1064 unsigned int x, slot = 0;
1065 unsigned int found = 0;
1067 uint32_t extlen = 0;
1070 if (!ip->i_di.di_size ||
1071 ip->i_di.di_size > (64 << 20) ||
1072 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1073 gfs2_consist_inode(ip);
1076 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1077 sdp->sd_quota_chunks = DIV_RU(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1081 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1082 sizeof(unsigned char *), GFP_KERNEL);
1083 if (!sdp->sd_quota_bitmap)
1086 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1087 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1088 if (!sdp->sd_quota_bitmap[x])
1092 for (x = 0; x < blocks; x++) {
1093 struct buffer_head *bh;
1098 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
1102 gfs2_meta_ra(ip->i_gl, dblock, extlen);
1103 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
1108 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1114 y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1116 struct gfs2_quota_change qc;
1117 struct gfs2_quota_data *qd;
1119 gfs2_quota_change_in(&qc, bh->b_data +
1120 sizeof(struct gfs2_meta_header) +
1121 y * sizeof(struct gfs2_quota_change));
1125 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1132 set_bit(QDF_CHANGE, &qd->qd_flags);
1133 qd->qd_change = qc.qc_change;
1135 qd->qd_slot_count = 1;
1136 qd->qd_last_touched = jiffies;
1138 spin_lock(&sdp->sd_quota_spin);
1139 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1140 list_add(&qd->qd_list, &sdp->sd_quota_list);
1141 atomic_inc(&sdp->sd_quota_count);
1142 spin_unlock(&sdp->sd_quota_spin);
1153 fs_info(sdp, "found %u quota changes\n", found);
1158 gfs2_quota_cleanup(sdp);
1162 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1164 struct gfs2_quota_data *qd, *safe;
1167 spin_lock(&sdp->sd_quota_spin);
1168 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1169 if (!qd->qd_count &&
1170 time_after_eq(jiffies, qd->qd_last_touched +
1171 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1172 list_move(&qd->qd_list, &dead);
1173 gfs2_assert_warn(sdp,
1174 atomic_read(&sdp->sd_quota_count) > 0);
1175 atomic_dec(&sdp->sd_quota_count);
1178 spin_unlock(&sdp->sd_quota_spin);
1180 while (!list_empty(&dead)) {
1181 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1182 list_del(&qd->qd_list);
1184 gfs2_assert_warn(sdp, !qd->qd_change);
1185 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1186 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1188 gfs2_lvb_unhold(qd->qd_gl);
1193 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1195 struct list_head *head = &sdp->sd_quota_list;
1196 struct gfs2_quota_data *qd;
1199 spin_lock(&sdp->sd_quota_spin);
1200 while (!list_empty(head)) {
1201 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1203 if (qd->qd_count > 1 ||
1204 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1205 list_move(&qd->qd_list, head);
1206 spin_unlock(&sdp->sd_quota_spin);
1208 spin_lock(&sdp->sd_quota_spin);
1212 list_del(&qd->qd_list);
1213 atomic_dec(&sdp->sd_quota_count);
1214 spin_unlock(&sdp->sd_quota_spin);
1216 if (!qd->qd_count) {
1217 gfs2_assert_warn(sdp, !qd->qd_change);
1218 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1220 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1221 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1223 gfs2_lvb_unhold(qd->qd_gl);
1226 spin_lock(&sdp->sd_quota_spin);
1228 spin_unlock(&sdp->sd_quota_spin);
1230 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1232 if (sdp->sd_quota_bitmap) {
1233 for (x = 0; x < sdp->sd_quota_chunks; x++)
1234 kfree(sdp->sd_quota_bitmap[x]);
1235 kfree(sdp->sd_quota_bitmap);