]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/gfs2/glops.c
[GFS2] Clean up internal read function
[net-next-2.6.git] / fs / gfs2 / glops.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
b3b94faa
DT
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
5c676f6d 14#include <linux/gfs2_ondisk.h>
7d308590 15#include <linux/lm_interface.h>
b3b94faa
DT
16
17#include "gfs2.h"
5c676f6d 18#include "incore.h"
b3b94faa
DT
19#include "bmap.h"
20#include "glock.h"
21#include "glops.h"
22#include "inode.h"
23#include "log.h"
24#include "meta_io.h"
b3b94faa
DT
25#include "recovery.h"
26#include "rgrp.h"
5c676f6d 27#include "util.h"
ddacfaf7 28#include "trans.h"
b3b94faa 29
ddacfaf7
SW
30/**
31 * ail_empty_gl - remove all buffers for a given lock from the AIL
32 * @gl: the glock
33 *
34 * None of the buffers should be dirty, locked, or pinned.
35 */
36
37static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
38{
39 struct gfs2_sbd *sdp = gl->gl_sbd;
40 unsigned int blocks;
41 struct list_head *head = &gl->gl_ail_list;
42 struct gfs2_bufdata *bd;
43 struct buffer_head *bh;
ddacfaf7
SW
44 int error;
45
46 blocks = atomic_read(&gl->gl_ail_count);
47 if (!blocks)
48 return;
49
50 error = gfs2_trans_begin(sdp, 0, blocks);
51 if (gfs2_assert_withdraw(sdp, !error))
52 return;
53
54 gfs2_log_lock(sdp);
55 while (!list_empty(head)) {
56 bd = list_entry(head->next, struct gfs2_bufdata,
57 bd_ail_gl_list);
58 bh = bd->bd_bh;
1e1a3d03 59 gfs2_remove_from_ail(NULL, bd);
1ad38c43
SW
60 bd->bd_bh = NULL;
61 bh->b_private = NULL;
62 bd->bd_blkno = bh->b_blocknr;
63 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
64 gfs2_trans_add_revoke(sdp, bd);
ddacfaf7
SW
65 }
66 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
67 gfs2_log_unlock(sdp);
68
69 gfs2_trans_end(sdp);
70 gfs2_log_flush(sdp, NULL);
71}
ba7f7290
SW
72
73/**
74 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
75 * @gl: the glock
76 *
77 */
78
79static void gfs2_pte_inval(struct gfs2_glock *gl)
80{
81 struct gfs2_inode *ip;
82 struct inode *inode;
83
84 ip = gl->gl_object;
85 inode = &ip->i_inode;
b60623c2 86 if (!ip || !S_ISREG(inode->i_mode))
ba7f7290
SW
87 return;
88
89 if (!test_bit(GIF_PAGED, &ip->i_flags))
90 return;
91
92 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
93
94 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
95 set_bit(GLF_DIRTY, &gl->gl_flags);
96
97 clear_bit(GIF_SW_PAGED, &ip->i_flags);
98}
99
b3b94faa
DT
100/**
101 * meta_go_sync - sync out the metadata for this glock
102 * @gl: the glock
b3b94faa
DT
103 *
104 * Called when demoting or unlocking an EX glock. We must flush
105 * to disk all dirty buffers/pages relating to this glock, and must not
106 * not return to caller to demote/unlock the glock until I/O is complete.
107 */
108
1a14d3a6 109static void meta_go_sync(struct gfs2_glock *gl)
b3b94faa 110{
b5d32bea
SW
111 if (gl->gl_state != LM_ST_EXCLUSIVE)
112 return;
113
b3b94faa 114 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
b09e593d 115 gfs2_log_flush(gl->gl_sbd, gl);
7276b3b0 116 gfs2_meta_sync(gl);
1a14d3a6 117 gfs2_ail_empty_gl(gl);
b3b94faa 118 }
b3b94faa
DT
119}
120
121/**
122 * meta_go_inval - invalidate the metadata for this glock
123 * @gl: the glock
124 * @flags:
125 *
126 */
127
128static void meta_go_inval(struct gfs2_glock *gl, int flags)
129{
130 if (!(flags & DIO_METADATA))
131 return;
132
133 gfs2_meta_inval(gl);
134 gl->gl_vn++;
135}
136
b5d32bea
SW
137/**
138 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
139 * @gl: the glock protecting the inode
140 *
141 */
142
143static void inode_go_sync(struct gfs2_glock *gl)
144{
145 struct gfs2_inode *ip = gl->gl_object;
146
147 if (ip && !S_ISREG(ip->i_inode.i_mode))
148 ip = NULL;
149
150 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
bb3b0e3d 151 if (ip && !gfs2_is_jdata(ip))
b5d32bea 152 filemap_fdatawrite(ip->i_inode.i_mapping);
b524fe64 153 gfs2_log_flush(gl->gl_sbd, gl);
bb3b0e3d
SW
154 if (ip && gfs2_is_jdata(ip))
155 filemap_fdatawrite(ip->i_inode.i_mapping);
b5d32bea
SW
156 gfs2_meta_sync(gl);
157 if (ip) {
158 struct address_space *mapping = ip->i_inode.i_mapping;
159 int error = filemap_fdatawait(mapping);
3e9f45bd 160 mapping_set_error(mapping, error);
b5d32bea
SW
161 }
162 clear_bit(GLF_DIRTY, &gl->gl_flags);
163 gfs2_ail_empty_gl(gl);
164 }
165}
166
b3b94faa
DT
167/**
168 * inode_go_xmote_th - promote/demote a glock
169 * @gl: the glock
170 * @state: the requested state
171 * @flags:
172 *
173 */
174
b5d32bea 175static void inode_go_xmote_th(struct gfs2_glock *gl)
b3b94faa
DT
176{
177 if (gl->gl_state != LM_ST_UNLOCKED)
178 gfs2_pte_inval(gl);
b5d32bea
SW
179 if (gl->gl_state == LM_ST_EXCLUSIVE)
180 inode_go_sync(gl);
b3b94faa
DT
181}
182
183/**
184 * inode_go_xmote_bh - After promoting/demoting a glock
185 * @gl: the glock
186 *
187 */
188
189static void inode_go_xmote_bh(struct gfs2_glock *gl)
190{
191 struct gfs2_holder *gh = gl->gl_req_gh;
192 struct buffer_head *bh;
193 int error;
194
195 if (gl->gl_state != LM_ST_UNLOCKED &&
196 (!gh || !(gh->gh_flags & GL_SKIP))) {
7276b3b0 197 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
b3b94faa
DT
198 if (!error)
199 brelse(bh);
200 }
201}
202
203/**
204 * inode_go_drop_th - unlock a glock
205 * @gl: the glock
206 *
207 * Invoked from rq_demote().
208 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
209 * is being purged from our node's glock cache; we're dropping lock.
210 */
211
212static void inode_go_drop_th(struct gfs2_glock *gl)
213{
214 gfs2_pte_inval(gl);
b5d32bea
SW
215 if (gl->gl_state == LM_ST_EXCLUSIVE)
216 inode_go_sync(gl);
b3b94faa
DT
217}
218
219/**
220 * inode_go_inval - prepare a inode glock to be released
221 * @gl: the glock
222 * @flags:
223 *
224 */
225
226static void inode_go_inval(struct gfs2_glock *gl, int flags)
227{
b004157a 228 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 229 int meta = (flags & DIO_METADATA);
b3b94faa
DT
230
231 if (meta) {
232 gfs2_meta_inval(gl);
b004157a
SW
233 if (ip)
234 set_bit(GIF_INVALID, &ip->i_flags);
235 }
236
237 if (ip && S_ISREG(ip->i_inode.i_mode)) {
238 truncate_inode_pages(ip->i_inode.i_mapping, 0);
b004157a 239 clear_bit(GIF_PAGED, &ip->i_flags);
b3b94faa 240 }
b3b94faa
DT
241}
242
243/**
244 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
245 * @gl: the glock
246 *
247 * Returns: 1 if it's ok
248 */
249
250static int inode_go_demote_ok(struct gfs2_glock *gl)
251{
252 struct gfs2_sbd *sdp = gl->gl_sbd;
253 int demote = 0;
254
5c676f6d 255 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
b3b94faa
DT
256 demote = 1;
257 else if (!sdp->sd_args.ar_localcaching &&
258 time_after_eq(jiffies, gl->gl_stamp +
259 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
260 demote = 1;
261
262 return demote;
263}
264
265/**
266 * inode_go_lock - operation done after an inode lock is locked by a process
267 * @gl: the glock
268 * @flags:
269 *
270 * Returns: errno
271 */
272
273static int inode_go_lock(struct gfs2_holder *gh)
274{
275 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 276 struct gfs2_inode *ip = gl->gl_object;
b3b94faa
DT
277 int error = 0;
278
279 if (!ip)
280 return 0;
281
bfded27b 282 if (test_bit(GIF_INVALID, &ip->i_flags)) {
b3b94faa
DT
283 error = gfs2_inode_refresh(ip);
284 if (error)
285 return error;
b3b94faa
DT
286 }
287
288 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
289 (gl->gl_state == LM_ST_EXCLUSIVE) &&
1c0f4872 290 (gh->gh_state == LM_ST_EXCLUSIVE))
b3b94faa
DT
291 error = gfs2_truncatei_resume(ip);
292
293 return error;
294}
295
296/**
297 * inode_go_unlock - operation done before an inode lock is unlocked by a
298 * process
299 * @gl: the glock
300 * @flags:
301 *
302 */
303
304static void inode_go_unlock(struct gfs2_holder *gh)
305{
306 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 307 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 308
9e2dbdac
SW
309 if (ip)
310 gfs2_meta_cache_flush(ip);
b3b94faa
DT
311}
312
b3b94faa
DT
313/**
314 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
315 * @gl: the glock
316 *
317 * Returns: 1 if it's ok
318 */
319
320static int rgrp_go_demote_ok(struct gfs2_glock *gl)
321{
322 return !gl->gl_aspace->i_mapping->nrpages;
323}
324
325/**
326 * rgrp_go_lock - operation done after an rgrp lock is locked by
327 * a first holder on this node.
328 * @gl: the glock
329 * @flags:
330 *
331 * Returns: errno
332 */
333
334static int rgrp_go_lock(struct gfs2_holder *gh)
335{
5c676f6d 336 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
b3b94faa
DT
337}
338
339/**
340 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
341 * a last holder on this node.
342 * @gl: the glock
343 * @flags:
344 *
345 */
346
347static void rgrp_go_unlock(struct gfs2_holder *gh)
348{
5c676f6d 349 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
b3b94faa
DT
350}
351
352/**
353 * trans_go_xmote_th - promote/demote the transaction glock
354 * @gl: the glock
355 * @state: the requested state
356 * @flags:
357 *
358 */
359
b5d32bea 360static void trans_go_xmote_th(struct gfs2_glock *gl)
b3b94faa
DT
361{
362 struct gfs2_sbd *sdp = gl->gl_sbd;
363
364 if (gl->gl_state != LM_ST_UNLOCKED &&
365 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
366 gfs2_meta_syncfs(sdp);
367 gfs2_log_shutdown(sdp);
368 }
b3b94faa
DT
369}
370
371/**
372 * trans_go_xmote_bh - After promoting/demoting the transaction glock
373 * @gl: the glock
374 *
375 */
376
377static void trans_go_xmote_bh(struct gfs2_glock *gl)
378{
379 struct gfs2_sbd *sdp = gl->gl_sbd;
feaa7bba 380 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
5c676f6d 381 struct gfs2_glock *j_gl = ip->i_gl;
55167622 382 struct gfs2_log_header_host head;
b3b94faa
DT
383 int error;
384
385 if (gl->gl_state != LM_ST_UNLOCKED &&
386 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
feaa7bba 387 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
1a14d3a6 388 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
b3b94faa
DT
389
390 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
391 if (error)
392 gfs2_consist(sdp);
393 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
394 gfs2_consist(sdp);
395
396 /* Initialize some head of the log stuff */
397 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
398 sdp->sd_log_sequence = head.lh_sequence + 1;
399 gfs2_log_pointers_init(sdp, head.lh_blkno);
400 }
401 }
402}
403
404/**
405 * trans_go_drop_th - unlock the transaction glock
406 * @gl: the glock
407 *
408 * We want to sync the device even with localcaching. Remember
409 * that localcaching journal replay only marks buffers dirty.
410 */
411
412static void trans_go_drop_th(struct gfs2_glock *gl)
413{
414 struct gfs2_sbd *sdp = gl->gl_sbd;
415
416 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
417 gfs2_meta_syncfs(sdp);
418 gfs2_log_shutdown(sdp);
419 }
b3b94faa
DT
420}
421
422/**
423 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
424 * @gl: the glock
425 *
426 * Returns: 1 if it's ok
427 */
428
429static int quota_go_demote_ok(struct gfs2_glock *gl)
430{
431 return !atomic_read(&gl->gl_lvb_count);
432}
433
8fb4b536 434const struct gfs2_glock_operations gfs2_meta_glops = {
b5d32bea
SW
435 .go_xmote_th = meta_go_sync,
436 .go_drop_th = meta_go_sync,
ea67eedb 437 .go_type = LM_TYPE_META,
b3b94faa
DT
438};
439
8fb4b536 440const struct gfs2_glock_operations gfs2_inode_glops = {
b3b94faa
DT
441 .go_xmote_th = inode_go_xmote_th,
442 .go_xmote_bh = inode_go_xmote_bh,
443 .go_drop_th = inode_go_drop_th,
b3b94faa
DT
444 .go_inval = inode_go_inval,
445 .go_demote_ok = inode_go_demote_ok,
446 .go_lock = inode_go_lock,
447 .go_unlock = inode_go_unlock,
ea67eedb 448 .go_type = LM_TYPE_INODE,
c4f68a13 449 .go_min_hold_time = HZ / 10,
b3b94faa
DT
450};
451
8fb4b536 452const struct gfs2_glock_operations gfs2_rgrp_glops = {
cad5b939
SW
453 .go_xmote_th = meta_go_sync,
454 .go_drop_th = meta_go_sync,
b3b94faa
DT
455 .go_inval = meta_go_inval,
456 .go_demote_ok = rgrp_go_demote_ok,
457 .go_lock = rgrp_go_lock,
458 .go_unlock = rgrp_go_unlock,
ea67eedb 459 .go_type = LM_TYPE_RGRP,
c4f68a13 460 .go_min_hold_time = HZ / 10,
b3b94faa
DT
461};
462
8fb4b536 463const struct gfs2_glock_operations gfs2_trans_glops = {
b3b94faa
DT
464 .go_xmote_th = trans_go_xmote_th,
465 .go_xmote_bh = trans_go_xmote_bh,
466 .go_drop_th = trans_go_drop_th,
ea67eedb 467 .go_type = LM_TYPE_NONDISK,
b3b94faa
DT
468};
469
8fb4b536 470const struct gfs2_glock_operations gfs2_iopen_glops = {
ea67eedb 471 .go_type = LM_TYPE_IOPEN,
b3b94faa
DT
472};
473
8fb4b536 474const struct gfs2_glock_operations gfs2_flock_glops = {
ea67eedb 475 .go_type = LM_TYPE_FLOCK,
b3b94faa
DT
476};
477
8fb4b536 478const struct gfs2_glock_operations gfs2_nondisk_glops = {
ea67eedb 479 .go_type = LM_TYPE_NONDISK,
b3b94faa
DT
480};
481
8fb4b536 482const struct gfs2_glock_operations gfs2_quota_glops = {
b3b94faa 483 .go_demote_ok = quota_go_demote_ok,
ea67eedb 484 .go_type = LM_TYPE_QUOTA,
b3b94faa
DT
485};
486
8fb4b536 487const struct gfs2_glock_operations gfs2_journal_glops = {
ea67eedb 488 .go_type = LM_TYPE_JOURNAL,
b3b94faa
DT
489};
490