]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ocfs2/quota_global.c
ocfs2: Fix oops when extending quota files
[net-next-2.6.git] / fs / ocfs2 / quota_global.c
CommitLineData
9e33d69f
JK
1/*
2 * Implementation of operations over global quota file
3 */
171bf93c 4#include <linux/spinlock.h>
9e33d69f
JK
5#include <linux/fs.h>
6#include <linux/quota.h>
7#include <linux/quotaops.h>
8#include <linux/dqblk_qtree.h>
171bf93c
MF
9#include <linux/jiffies.h>
10#include <linux/writeback.h>
11#include <linux/workqueue.h>
9e33d69f
JK
12
13#define MLOG_MASK_PREFIX ML_QUOTA
14#include <cluster/masklog.h>
15
16#include "ocfs2_fs.h"
17#include "ocfs2.h"
18#include "alloc.h"
19#include "inode.h"
20#include "journal.h"
21#include "file.h"
22#include "sysfile.h"
23#include "dlmglue.h"
24#include "uptodate.h"
25#include "quota.h"
26
171bf93c
MF
27static struct workqueue_struct *ocfs2_quota_wq = NULL;
28
29static void qsync_work_fn(struct work_struct *work);
30
9e33d69f
JK
31static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
32{
33 struct ocfs2_global_disk_dqblk *d = dp;
34 struct mem_dqblk *m = &dquot->dq_dqb;
35
36 /* Update from disk only entries not set by the admin */
37 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
38 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
39 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
40 }
41 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
42 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
43 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
44 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
45 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
46 }
47 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
48 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
49 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
50 m->dqb_btime = le64_to_cpu(d->dqb_btime);
51 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
52 m->dqb_itime = le64_to_cpu(d->dqb_itime);
53 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
54}
55
56static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
57{
58 struct ocfs2_global_disk_dqblk *d = dp;
59 struct mem_dqblk *m = &dquot->dq_dqb;
60
61 d->dqb_id = cpu_to_le32(dquot->dq_id);
62 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
63 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
64 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
65 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
66 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
67 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
68 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
69 d->dqb_btime = cpu_to_le64(m->dqb_btime);
70 d->dqb_itime = cpu_to_le64(m->dqb_itime);
71}
72
73static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
74{
75 struct ocfs2_global_disk_dqblk *d = dp;
76 struct ocfs2_mem_dqinfo *oinfo =
77 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
78
79 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
80 return 0;
81 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
82}
83
84struct qtree_fmt_operations ocfs2_global_ops = {
85 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
86 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
87 .is_id = ocfs2_global_is_id,
88};
89
85eb8b73
JB
90int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
91 struct buffer_head **bh)
9e33d69f 92{
85eb8b73
JB
93 int rc = 0;
94 struct buffer_head *tmp = *bh;
9e33d69f 95
85eb8b73
JB
96 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0, NULL);
97 if (rc)
98 mlog_errno(rc);
99
100 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
101 if (!rc && !*bh)
102 *bh = tmp;
9e33d69f 103
85eb8b73 104 return rc;
9e33d69f
JK
105}
106
107static struct buffer_head *ocfs2_get_quota_block(struct inode *inode,
108 int block, int *err)
109{
110 u64 pblock, pcount;
111 struct buffer_head *bh;
112
113 down_read(&OCFS2_I(inode)->ip_alloc_sem);
114 *err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount,
115 NULL);
116 up_read(&OCFS2_I(inode)->ip_alloc_sem);
117 if (*err) {
118 mlog_errno(*err);
119 return NULL;
120 }
121 bh = sb_getblk(inode->i_sb, pblock);
122 if (!bh) {
123 *err = -EIO;
124 mlog_errno(*err);
125 }
126 return bh;
127}
128
129/* Read data from global quotafile - avoid pagecache and such because we cannot
130 * afford acquiring the locks... We use quota cluster lock to serialize
131 * operations. Caller is responsible for acquiring it. */
132ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
133 size_t len, loff_t off)
134{
135 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
136 struct inode *gqinode = oinfo->dqi_gqinode;
137 loff_t i_size = i_size_read(gqinode);
138 int offset = off & (sb->s_blocksize - 1);
139 sector_t blk = off >> sb->s_blocksize_bits;
140 int err = 0;
141 struct buffer_head *bh;
142 size_t toread, tocopy;
143
144 if (off > i_size)
145 return 0;
146 if (off + len > i_size)
147 len = i_size - off;
148 toread = len;
149 while (toread > 0) {
150 tocopy = min((size_t)(sb->s_blocksize - offset), toread);
85eb8b73
JB
151 bh = NULL;
152 err = ocfs2_read_quota_block(gqinode, blk, &bh);
153 if (err) {
9e33d69f
JK
154 mlog_errno(err);
155 return err;
156 }
157 memcpy(data, bh->b_data + offset, tocopy);
158 brelse(bh);
159 offset = 0;
160 toread -= tocopy;
161 data += tocopy;
162 blk++;
163 }
164 return len;
165}
166
167/* Write to quotafile (we know the transaction is already started and has
168 * enough credits) */
169ssize_t ocfs2_quota_write(struct super_block *sb, int type,
170 const char *data, size_t len, loff_t off)
171{
172 struct mem_dqinfo *info = sb_dqinfo(sb, type);
173 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
174 struct inode *gqinode = oinfo->dqi_gqinode;
175 int offset = off & (sb->s_blocksize - 1);
176 sector_t blk = off >> sb->s_blocksize_bits;
af09e51b 177 int err = 0, new = 0, ja_type;
85eb8b73 178 struct buffer_head *bh = NULL;
9e33d69f
JK
179 handle_t *handle = journal_current_handle();
180
181 if (!handle) {
182 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
183 "because transaction was not started.\n",
184 (unsigned long long)off, (unsigned long long)len);
185 return -EIO;
186 }
187 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
188 WARN_ON(1);
189 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
190 }
191
192 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
193 if (gqinode->i_size < off + len) {
194 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
195 err = ocfs2_extend_no_holes(gqinode, off + len, off);
196 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
197 if (err < 0)
198 goto out;
199 err = ocfs2_simple_size_update(gqinode,
200 oinfo->dqi_gqi_bh,
201 off + len);
202 if (err < 0)
203 goto out;
204 new = 1;
205 }
206 /* Not rewriting whole block? */
207 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
208 !new) {
85eb8b73 209 err = ocfs2_read_quota_block(gqinode, blk, &bh);
af09e51b 210 ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
9e33d69f
JK
211 } else {
212 bh = ocfs2_get_quota_block(gqinode, blk, &err);
af09e51b 213 ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
9e33d69f 214 }
af09e51b
JK
215 if (err) {
216 mlog_errno(err);
217 return err;
9e33d69f
JK
218 }
219 lock_buffer(bh);
220 if (new)
221 memset(bh->b_data, 0, sb->s_blocksize);
222 memcpy(bh->b_data + offset, data, len);
223 flush_dcache_page(bh->b_page);
af09e51b 224 set_buffer_uptodate(bh);
9e33d69f
JK
225 unlock_buffer(bh);
226 ocfs2_set_buffer_uptodate(gqinode, bh);
af09e51b
JK
227 err = ocfs2_journal_access(handle, gqinode, bh, ja_type);
228 if (err < 0) {
229 brelse(bh);
230 goto out;
231 }
9e33d69f
JK
232 err = ocfs2_journal_dirty(handle, bh);
233 brelse(bh);
234 if (err < 0)
235 goto out;
236out:
237 if (err) {
238 mutex_unlock(&gqinode->i_mutex);
239 mlog_errno(err);
240 return err;
241 }
242 gqinode->i_version++;
243 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
244 mutex_unlock(&gqinode->i_mutex);
245 return len;
246}
247
248int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
249{
250 int status;
251 struct buffer_head *bh = NULL;
252
253 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
254 if (status < 0)
255 return status;
256 spin_lock(&dq_data_lock);
257 if (!oinfo->dqi_gqi_count++)
258 oinfo->dqi_gqi_bh = bh;
259 else
260 WARN_ON(bh != oinfo->dqi_gqi_bh);
261 spin_unlock(&dq_data_lock);
262 return 0;
263}
264
265void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
266{
267 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
268 brelse(oinfo->dqi_gqi_bh);
269 spin_lock(&dq_data_lock);
270 if (!--oinfo->dqi_gqi_count)
271 oinfo->dqi_gqi_bh = NULL;
272 spin_unlock(&dq_data_lock);
273}
274
275/* Read information header from global quota file */
276int ocfs2_global_read_info(struct super_block *sb, int type)
277{
278 struct inode *gqinode = NULL;
279 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
280 GROUP_QUOTA_SYSTEM_INODE };
281 struct ocfs2_global_disk_dqinfo dinfo;
282 struct mem_dqinfo *info = sb_dqinfo(sb, type);
283 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
284 int status;
285
286 mlog_entry_void();
287
288 /* Read global header */
289 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
290 OCFS2_INVALID_SLOT);
291 if (!gqinode) {
292 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
293 type);
294 status = -EINVAL;
295 goto out_err;
296 }
297 oinfo->dqi_gi.dqi_sb = sb;
298 oinfo->dqi_gi.dqi_type = type;
299 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
300 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
301 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
302 oinfo->dqi_gqi_bh = NULL;
303 oinfo->dqi_gqi_count = 0;
304 oinfo->dqi_gqinode = gqinode;
305 status = ocfs2_lock_global_qf(oinfo, 0);
306 if (status < 0) {
307 mlog_errno(status);
308 goto out_err;
309 }
310 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
311 sizeof(struct ocfs2_global_disk_dqinfo),
312 OCFS2_GLOBAL_INFO_OFF);
313 ocfs2_unlock_global_qf(oinfo, 0);
314 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
315 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
316 status);
317 if (status >= 0)
318 status = -EIO;
319 mlog_errno(status);
320 goto out_err;
321 }
322 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
323 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
324 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
171bf93c 325 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
9e33d69f
JK
326 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
327 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
328 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
329 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
330 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
331 OCFS2_QBLK_RESERVED_SPACE;
332 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
171bf93c
MF
333 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
334 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
335 oinfo->dqi_syncjiff);
336
9e33d69f
JK
337out_err:
338 mlog_exit(status);
339 return status;
340}
341
342/* Write information to global quota file. Expects exlusive lock on quota
343 * file inode and quota info */
344static int __ocfs2_global_write_info(struct super_block *sb, int type)
345{
346 struct mem_dqinfo *info = sb_dqinfo(sb, type);
347 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
348 struct ocfs2_global_disk_dqinfo dinfo;
349 ssize_t size;
350
351 spin_lock(&dq_data_lock);
352 info->dqi_flags &= ~DQF_INFO_DIRTY;
353 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
354 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
355 spin_unlock(&dq_data_lock);
356 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
357 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
358 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
359 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
360 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
361 sizeof(struct ocfs2_global_disk_dqinfo),
362 OCFS2_GLOBAL_INFO_OFF);
363 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
364 mlog(ML_ERROR, "Cannot write global quota info structure\n");
365 if (size >= 0)
366 size = -EIO;
367 return size;
368 }
369 return 0;
370}
371
372int ocfs2_global_write_info(struct super_block *sb, int type)
373{
374 int err;
375 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
376
377 err = ocfs2_qinfo_lock(info, 1);
378 if (err < 0)
379 return err;
380 err = __ocfs2_global_write_info(sb, type);
381 ocfs2_qinfo_unlock(info, 1);
382 return err;
383}
384
385/* Read in information from global quota file and acquire a reference to it.
386 * dquot_acquire() has already started the transaction and locked quota file */
387int ocfs2_global_read_dquot(struct dquot *dquot)
388{
389 int err, err2, ex = 0;
390 struct ocfs2_mem_dqinfo *info =
391 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
392
393 err = ocfs2_qinfo_lock(info, 0);
394 if (err < 0)
395 goto out;
396 err = qtree_read_dquot(&info->dqi_gi, dquot);
397 if (err < 0)
398 goto out_qlock;
399 OCFS2_DQUOT(dquot)->dq_use_count++;
400 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
401 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
402 if (!dquot->dq_off) { /* No real quota entry? */
403 /* Upgrade to exclusive lock for allocation */
404 err = ocfs2_qinfo_lock(info, 1);
405 if (err < 0)
406 goto out_qlock;
407 ex = 1;
408 }
409 err = qtree_write_dquot(&info->dqi_gi, dquot);
410 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
411 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
412 if (!err)
413 err = err2;
414 }
415out_qlock:
416 if (ex)
417 ocfs2_qinfo_unlock(info, 1);
418 ocfs2_qinfo_unlock(info, 0);
419out:
420 if (err < 0)
421 mlog_errno(err);
422 return err;
423}
424
425/* Sync local information about quota modifications with global quota file.
426 * Caller must have started the transaction and obtained exclusive lock for
427 * global quota file inode */
428int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
429{
430 int err, err2;
431 struct super_block *sb = dquot->dq_sb;
432 int type = dquot->dq_type;
433 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
434 struct ocfs2_global_disk_dqblk dqblk;
435 s64 spacechange, inodechange;
436 time_t olditime, oldbtime;
437
438 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
439 sizeof(struct ocfs2_global_disk_dqblk),
440 dquot->dq_off);
441 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
442 if (err >= 0) {
443 mlog(ML_ERROR, "Short read from global quota file "
444 "(%u read)\n", err);
445 err = -EIO;
446 }
447 goto out;
448 }
449
450 /* Update space and inode usage. Get also other information from
451 * global quota file so that we don't overwrite any changes there.
452 * We are */
453 spin_lock(&dq_data_lock);
454 spacechange = dquot->dq_dqb.dqb_curspace -
455 OCFS2_DQUOT(dquot)->dq_origspace;
456 inodechange = dquot->dq_dqb.dqb_curinodes -
457 OCFS2_DQUOT(dquot)->dq_originodes;
458 olditime = dquot->dq_dqb.dqb_itime;
459 oldbtime = dquot->dq_dqb.dqb_btime;
460 ocfs2_global_disk2memdqb(dquot, &dqblk);
461 mlog(0, "Syncing global dquot %d space %lld+%lld, inodes %lld+%lld\n",
462 dquot->dq_id, dquot->dq_dqb.dqb_curspace, spacechange,
463 dquot->dq_dqb.dqb_curinodes, inodechange);
464 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
465 dquot->dq_dqb.dqb_curspace += spacechange;
466 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
467 dquot->dq_dqb.dqb_curinodes += inodechange;
468 /* Set properly space grace time... */
469 if (dquot->dq_dqb.dqb_bsoftlimit &&
470 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
471 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
472 oldbtime > 0) {
473 if (dquot->dq_dqb.dqb_btime > 0)
474 dquot->dq_dqb.dqb_btime =
475 min(dquot->dq_dqb.dqb_btime, oldbtime);
476 else
477 dquot->dq_dqb.dqb_btime = oldbtime;
478 }
479 } else {
480 dquot->dq_dqb.dqb_btime = 0;
481 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
482 }
483 /* Set properly inode grace time... */
484 if (dquot->dq_dqb.dqb_isoftlimit &&
485 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
486 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
487 olditime > 0) {
488 if (dquot->dq_dqb.dqb_itime > 0)
489 dquot->dq_dqb.dqb_itime =
490 min(dquot->dq_dqb.dqb_itime, olditime);
491 else
492 dquot->dq_dqb.dqb_itime = olditime;
493 }
494 } else {
495 dquot->dq_dqb.dqb_itime = 0;
496 clear_bit(DQ_INODES_B, &dquot->dq_flags);
497 }
498 /* All information is properly updated, clear the flags */
499 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
500 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
501 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
502 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
503 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
504 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
505 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
506 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
507 spin_unlock(&dq_data_lock);
508 err = ocfs2_qinfo_lock(info, freeing);
509 if (err < 0) {
510 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
511 " (type=%d, id=%u)\n", dquot->dq_type,
512 (unsigned)dquot->dq_id);
513 goto out;
514 }
515 if (freeing)
516 OCFS2_DQUOT(dquot)->dq_use_count--;
517 err = qtree_write_dquot(&info->dqi_gi, dquot);
518 if (err < 0)
519 goto out_qlock;
520 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
521 err = qtree_release_dquot(&info->dqi_gi, dquot);
522 if (info_dirty(sb_dqinfo(sb, type))) {
523 err2 = __ocfs2_global_write_info(sb, type);
524 if (!err)
525 err = err2;
526 }
527 }
528out_qlock:
529 ocfs2_qinfo_unlock(info, freeing);
530out:
531 if (err < 0)
532 mlog_errno(err);
533 return err;
534}
535
171bf93c
MF
536/*
537 * Functions for periodic syncing of dquots with global file
538 */
539static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
540{
541 handle_t *handle;
542 struct super_block *sb = dquot->dq_sb;
543 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
544 struct ocfs2_super *osb = OCFS2_SB(sb);
545 int status = 0;
546
547 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
548 dquot->dq_type, type, sb->s_id);
549 if (type != dquot->dq_type)
550 goto out;
551 status = ocfs2_lock_global_qf(oinfo, 1);
552 if (status < 0)
553 goto out;
554
555 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
556 if (IS_ERR(handle)) {
557 status = PTR_ERR(handle);
558 mlog_errno(status);
559 goto out_ilock;
560 }
561 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
562 status = ocfs2_sync_dquot(dquot);
563 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
564 if (status < 0)
565 mlog_errno(status);
566 /* We have to write local structure as well... */
567 dquot_mark_dquot_dirty(dquot);
568 status = dquot_commit(dquot);
569 if (status < 0)
570 mlog_errno(status);
571 ocfs2_commit_trans(osb, handle);
572out_ilock:
573 ocfs2_unlock_global_qf(oinfo, 1);
574out:
575 mlog_exit(status);
576 return status;
577}
578
579static void qsync_work_fn(struct work_struct *work)
580{
581 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
582 struct ocfs2_mem_dqinfo,
583 dqi_sync_work.work);
584 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
585
586 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
587 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
588 oinfo->dqi_syncjiff);
589}
590
9e33d69f
JK
591/*
592 * Wrappers for generic quota functions
593 */
594
595static int ocfs2_write_dquot(struct dquot *dquot)
596{
597 handle_t *handle;
598 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
599 int status = 0;
600
601 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
602
603 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
604 if (IS_ERR(handle)) {
605 status = PTR_ERR(handle);
606 mlog_errno(status);
607 goto out;
608 }
609 status = dquot_commit(dquot);
610 ocfs2_commit_trans(osb, handle);
611out:
612 mlog_exit(status);
613 return status;
614}
615
616int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
617{
618 struct ocfs2_mem_dqinfo *oinfo;
619 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
620 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
621
622 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
623 return 0;
624
625 oinfo = sb_dqinfo(sb, type)->dqi_priv;
626 /* We modify tree, leaf block, global info, local chunk header,
627 * global and local inode */
628 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
629 2 * OCFS2_INODE_UPDATE_CREDITS;
630}
631
632static int ocfs2_release_dquot(struct dquot *dquot)
633{
634 handle_t *handle;
635 struct ocfs2_mem_dqinfo *oinfo =
636 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
637 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
638 int status = 0;
639
640 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
641
642 status = ocfs2_lock_global_qf(oinfo, 1);
643 if (status < 0)
644 goto out;
645 handle = ocfs2_start_trans(osb,
646 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
647 if (IS_ERR(handle)) {
648 status = PTR_ERR(handle);
649 mlog_errno(status);
650 goto out_ilock;
651 }
652 status = dquot_release(dquot);
653 ocfs2_commit_trans(osb, handle);
654out_ilock:
655 ocfs2_unlock_global_qf(oinfo, 1);
656out:
657 mlog_exit(status);
658 return status;
659}
660
661int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
662{
663 struct ocfs2_mem_dqinfo *oinfo;
664 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
665 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
666 struct ocfs2_dinode *lfe, *gfe;
667
668 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
669 return 0;
670
671 oinfo = sb_dqinfo(sb, type)->dqi_priv;
672 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
673 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
674 /* We can extend local file + global file. In local file we
675 * can modify info, chunk header block and dquot block. In
676 * global file we can modify info, tree and leaf block */
677 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
678 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
679 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
680}
681
682static int ocfs2_acquire_dquot(struct dquot *dquot)
683{
684 handle_t *handle;
685 struct ocfs2_mem_dqinfo *oinfo =
686 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
687 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
688 int status = 0;
689
690 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
691 /* We need an exclusive lock, because we're going to update use count
692 * and instantiate possibly new dquot structure */
693 status = ocfs2_lock_global_qf(oinfo, 1);
694 if (status < 0)
695 goto out;
696 handle = ocfs2_start_trans(osb,
697 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
698 if (IS_ERR(handle)) {
699 status = PTR_ERR(handle);
700 mlog_errno(status);
701 goto out_ilock;
702 }
703 status = dquot_acquire(dquot);
704 ocfs2_commit_trans(osb, handle);
705out_ilock:
706 ocfs2_unlock_global_qf(oinfo, 1);
707out:
708 mlog_exit(status);
709 return status;
710}
711
712static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
713{
714 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
715 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
716 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
717 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
718 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
719 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
720 int sync = 0;
721 int status;
722 struct super_block *sb = dquot->dq_sb;
723 int type = dquot->dq_type;
724 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
725 handle_t *handle;
726 struct ocfs2_super *osb = OCFS2_SB(sb);
727
728 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
729 dquot_mark_dquot_dirty(dquot);
730
731 /* In case user set some limits, sync dquot immediately to global
732 * quota file so that information propagates quicker */
733 spin_lock(&dq_data_lock);
734 if (dquot->dq_flags & mask)
735 sync = 1;
736 spin_unlock(&dq_data_lock);
737 if (!sync) {
738 status = ocfs2_write_dquot(dquot);
739 goto out;
740 }
741 status = ocfs2_lock_global_qf(oinfo, 1);
742 if (status < 0)
743 goto out;
744 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
745 if (IS_ERR(handle)) {
746 status = PTR_ERR(handle);
747 mlog_errno(status);
748 goto out_ilock;
749 }
750 status = ocfs2_sync_dquot(dquot);
751 if (status < 0) {
752 mlog_errno(status);
753 goto out_trans;
754 }
755 /* Now write updated local dquot structure */
756 status = dquot_commit(dquot);
757out_trans:
758 ocfs2_commit_trans(osb, handle);
759out_ilock:
760 ocfs2_unlock_global_qf(oinfo, 1);
761out:
762 mlog_exit(status);
763 return status;
764}
765
766/* This should happen only after set_dqinfo(). */
767static int ocfs2_write_info(struct super_block *sb, int type)
768{
769 handle_t *handle;
770 int status = 0;
771 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
772
773 mlog_entry_void();
774
775 status = ocfs2_lock_global_qf(oinfo, 1);
776 if (status < 0)
777 goto out;
778 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
779 if (IS_ERR(handle)) {
780 status = PTR_ERR(handle);
781 mlog_errno(status);
782 goto out_ilock;
783 }
784 status = dquot_commit_info(sb, type);
785 ocfs2_commit_trans(OCFS2_SB(sb), handle);
786out_ilock:
787 ocfs2_unlock_global_qf(oinfo, 1);
788out:
789 mlog_exit(status);
790 return status;
791}
792
793/* This is difficult. We have to lock quota inode and start transaction
794 * in this function but we don't want to take the penalty of exlusive
795 * quota file lock when we are just going to use cached structures. So
796 * we just take read lock check whether we have dquot cached and if so,
797 * we don't have to take the write lock... */
798static int ocfs2_dquot_initialize(struct inode *inode, int type)
799{
800 handle_t *handle = NULL;
801 int status = 0;
802 struct super_block *sb = inode->i_sb;
803 struct ocfs2_mem_dqinfo *oinfo;
804 int exclusive = 0;
805 int cnt;
806 qid_t id;
807
808 mlog_entry_void();
809
810 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
811 if (type != -1 && cnt != type)
812 continue;
813 if (!sb_has_quota_active(sb, cnt))
814 continue;
815 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
816 status = ocfs2_lock_global_qf(oinfo, 0);
817 if (status < 0)
818 goto out;
819 /* This is just a performance optimization not a reliable test.
820 * Since we hold an inode lock, noone can actually release
821 * the structure until we are finished with initialization. */
822 if (inode->i_dquot[cnt] != NODQUOT) {
823 ocfs2_unlock_global_qf(oinfo, 0);
824 continue;
825 }
826 /* When we have inode lock, we know that no dquot_release() can
827 * run and thus we can safely check whether we need to
828 * read+modify global file to get quota information or whether
829 * our node already has it. */
830 if (cnt == USRQUOTA)
831 id = inode->i_uid;
832 else if (cnt == GRPQUOTA)
833 id = inode->i_gid;
834 else
835 BUG();
836 /* Obtain exclusion from quota off... */
837 down_write(&sb_dqopt(sb)->dqptr_sem);
838 exclusive = !dquot_is_cached(sb, id, cnt);
839 up_write(&sb_dqopt(sb)->dqptr_sem);
840 if (exclusive) {
841 status = ocfs2_lock_global_qf(oinfo, 1);
842 if (status < 0) {
843 exclusive = 0;
844 mlog_errno(status);
845 goto out_ilock;
846 }
847 handle = ocfs2_start_trans(OCFS2_SB(sb),
848 ocfs2_calc_qinit_credits(sb, cnt));
849 if (IS_ERR(handle)) {
850 status = PTR_ERR(handle);
851 mlog_errno(status);
852 goto out_ilock;
853 }
854 }
855 dquot_initialize(inode, cnt);
856 if (exclusive) {
857 ocfs2_commit_trans(OCFS2_SB(sb), handle);
858 ocfs2_unlock_global_qf(oinfo, 1);
859 }
860 ocfs2_unlock_global_qf(oinfo, 0);
861 }
862 mlog_exit(0);
863 return 0;
864out_ilock:
865 if (exclusive)
866 ocfs2_unlock_global_qf(oinfo, 1);
867 ocfs2_unlock_global_qf(oinfo, 0);
868out:
869 mlog_exit(status);
870 return status;
871}
872
873static int ocfs2_dquot_drop_slow(struct inode *inode)
874{
57a09a7b 875 int status = 0;
9e33d69f
JK
876 int cnt;
877 int got_lock[MAXQUOTAS] = {0, 0};
878 handle_t *handle;
879 struct super_block *sb = inode->i_sb;
880 struct ocfs2_mem_dqinfo *oinfo;
881
882 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
883 if (!sb_has_quota_active(sb, cnt))
884 continue;
885 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
886 status = ocfs2_lock_global_qf(oinfo, 1);
887 if (status < 0)
888 goto out;
889 got_lock[cnt] = 1;
890 }
891 handle = ocfs2_start_trans(OCFS2_SB(sb),
892 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
893 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
894 if (IS_ERR(handle)) {
895 status = PTR_ERR(handle);
896 mlog_errno(status);
897 goto out;
898 }
899 dquot_drop(inode);
900 ocfs2_commit_trans(OCFS2_SB(sb), handle);
901out:
902 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
903 if (got_lock[cnt]) {
904 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
905 ocfs2_unlock_global_qf(oinfo, 1);
906 }
907 return status;
908}
909
910/* See the comment before ocfs2_dquot_initialize. */
911static int ocfs2_dquot_drop(struct inode *inode)
912{
913 int status = 0;
914 struct super_block *sb = inode->i_sb;
915 struct ocfs2_mem_dqinfo *oinfo;
916 int exclusive = 0;
917 int cnt;
918 int got_lock[MAXQUOTAS] = {0, 0};
919
920 mlog_entry_void();
921 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
922 if (!sb_has_quota_active(sb, cnt))
923 continue;
924 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
925 status = ocfs2_lock_global_qf(oinfo, 0);
926 if (status < 0)
927 goto out;
928 got_lock[cnt] = 1;
929 }
930 /* Lock against anyone releasing references so that when when we check
931 * we know we are not going to be last ones to release dquot */
932 down_write(&sb_dqopt(sb)->dqptr_sem);
933 /* Urgh, this is a terrible hack :( */
934 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
935 if (inode->i_dquot[cnt] != NODQUOT &&
936 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
937 exclusive = 1;
938 break;
939 }
940 }
941 if (!exclusive)
942 dquot_drop_locked(inode);
943 up_write(&sb_dqopt(sb)->dqptr_sem);
944out:
945 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
946 if (got_lock[cnt]) {
947 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
948 ocfs2_unlock_global_qf(oinfo, 0);
949 }
950 /* In case we bailed out because we had to do expensive locking
951 * do it now... */
952 if (exclusive)
953 status = ocfs2_dquot_drop_slow(inode);
954 mlog_exit(status);
955 return status;
956}
957
958static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
959{
960 struct ocfs2_dquot *dquot =
961 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
962
963 if (!dquot)
964 return NULL;
965 return &dquot->dq_dquot;
966}
967
968static void ocfs2_destroy_dquot(struct dquot *dquot)
969{
970 kmem_cache_free(ocfs2_dquot_cachep, dquot);
971}
972
973struct dquot_operations ocfs2_quota_operations = {
974 .initialize = ocfs2_dquot_initialize,
975 .drop = ocfs2_dquot_drop,
976 .alloc_space = dquot_alloc_space,
977 .alloc_inode = dquot_alloc_inode,
978 .free_space = dquot_free_space,
979 .free_inode = dquot_free_inode,
980 .transfer = dquot_transfer,
981 .write_dquot = ocfs2_write_dquot,
982 .acquire_dquot = ocfs2_acquire_dquot,
983 .release_dquot = ocfs2_release_dquot,
984 .mark_dirty = ocfs2_mark_dquot_dirty,
985 .write_info = ocfs2_write_info,
986 .alloc_dquot = ocfs2_alloc_dquot,
987 .destroy_dquot = ocfs2_destroy_dquot,
988};
171bf93c
MF
989
990int ocfs2_quota_setup(void)
991{
992 ocfs2_quota_wq = create_workqueue("o2quot");
993 if (!ocfs2_quota_wq)
994 return -ENOMEM;
995 return 0;
996}
997
998void ocfs2_quota_shutdown(void)
999{
1000 if (ocfs2_quota_wq) {
1001 flush_workqueue(ocfs2_quota_wq);
1002 destroy_workqueue(ocfs2_quota_wq);
1003 ocfs2_quota_wq = NULL;
1004 }
1005}