]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/gfs2/glops.c
[GFS2] Update copyright, tidy up incore.h
[net-next-2.6.git] / fs / gfs2 / glops.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
5c676f6d 15#include <linux/gfs2_ondisk.h>
b3b94faa
DT
16
17#include "gfs2.h"
5c676f6d
SW
18#include "lm_interface.h"
19#include "incore.h"
b3b94faa
DT
20#include "bmap.h"
21#include "glock.h"
22#include "glops.h"
23#include "inode.h"
24#include "log.h"
25#include "meta_io.h"
b3b94faa
DT
26#include "recovery.h"
27#include "rgrp.h"
5c676f6d 28#include "util.h"
b3b94faa 29
ba7f7290
SW
30
31/**
32 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
33 * @gl: the glock
34 *
35 */
36
37static void gfs2_pte_inval(struct gfs2_glock *gl)
38{
39 struct gfs2_inode *ip;
40 struct inode *inode;
41
42 ip = gl->gl_object;
43 inode = &ip->i_inode;
44 if (!ip || !S_ISREG(ip->i_di.di_mode))
45 return;
46
47 if (!test_bit(GIF_PAGED, &ip->i_flags))
48 return;
49
50 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
51
52 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
53 set_bit(GLF_DIRTY, &gl->gl_flags);
54
55 clear_bit(GIF_SW_PAGED, &ip->i_flags);
56}
57
58/**
59 * gfs2_page_inval - Invalidate all pages associated with a glock
60 * @gl: the glock
61 *
62 */
63
64static void gfs2_page_inval(struct gfs2_glock *gl)
65{
66 struct gfs2_inode *ip;
67 struct inode *inode;
68
69 ip = gl->gl_object;
70 inode = &ip->i_inode;
71 if (!ip || !S_ISREG(ip->i_di.di_mode))
72 return;
73
74 truncate_inode_pages(inode->i_mapping, 0);
75 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
76 clear_bit(GIF_PAGED, &ip->i_flags);
77}
78
79/**
80 * gfs2_page_sync - Sync the data pages (not metadata) associated with a glock
81 * @gl: the glock
82 * @flags: DIO_START | DIO_WAIT
83 *
84 * Syncs data (not metadata) for a regular file.
85 * No-op for all other types.
86 */
87
88static void gfs2_page_sync(struct gfs2_glock *gl, int flags)
89{
90 struct gfs2_inode *ip;
91 struct inode *inode;
92 struct address_space *mapping;
93 int error = 0;
94
95 ip = gl->gl_object;
96 inode = &ip->i_inode;
97 if (!ip || !S_ISREG(ip->i_di.di_mode))
98 return;
99
100 mapping = inode->i_mapping;
101
102 if (flags & DIO_START)
103 filemap_fdatawrite(mapping);
104 if (!error && (flags & DIO_WAIT))
105 error = filemap_fdatawait(mapping);
106
107 /* Put back any errors cleared by filemap_fdatawait()
108 so they can be caught by someone who can pass them
109 up to user space. */
110
111 if (error == -ENOSPC)
112 set_bit(AS_ENOSPC, &mapping->flags);
113 else if (error)
114 set_bit(AS_EIO, &mapping->flags);
115
116}
117
b3b94faa
DT
118/**
119 * meta_go_sync - sync out the metadata for this glock
120 * @gl: the glock
121 * @flags: DIO_*
122 *
123 * Called when demoting or unlocking an EX glock. We must flush
124 * to disk all dirty buffers/pages relating to this glock, and must not
125 * not return to caller to demote/unlock the glock until I/O is complete.
126 */
127
128static void meta_go_sync(struct gfs2_glock *gl, int flags)
129{
130 if (!(flags & DIO_METADATA))
131 return;
132
133 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
b09e593d 134 gfs2_log_flush(gl->gl_sbd, gl);
b3b94faa
DT
135 gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
136 if (flags & DIO_RELEASE)
137 gfs2_ail_empty_gl(gl);
138 }
139
b3b94faa
DT
140}
141
142/**
143 * meta_go_inval - invalidate the metadata for this glock
144 * @gl: the glock
145 * @flags:
146 *
147 */
148
149static void meta_go_inval(struct gfs2_glock *gl, int flags)
150{
151 if (!(flags & DIO_METADATA))
152 return;
153
154 gfs2_meta_inval(gl);
155 gl->gl_vn++;
156}
157
b3b94faa
DT
158/**
159 * inode_go_xmote_th - promote/demote a glock
160 * @gl: the glock
161 * @state: the requested state
162 * @flags:
163 *
164 */
165
166static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
167 int flags)
168{
169 if (gl->gl_state != LM_ST_UNLOCKED)
170 gfs2_pte_inval(gl);
171 gfs2_glock_xmote_th(gl, state, flags);
172}
173
174/**
175 * inode_go_xmote_bh - After promoting/demoting a glock
176 * @gl: the glock
177 *
178 */
179
180static void inode_go_xmote_bh(struct gfs2_glock *gl)
181{
182 struct gfs2_holder *gh = gl->gl_req_gh;
183 struct buffer_head *bh;
184 int error;
185
186 if (gl->gl_state != LM_ST_UNLOCKED &&
187 (!gh || !(gh->gh_flags & GL_SKIP))) {
188 error = gfs2_meta_read(gl, gl->gl_name.ln_number, DIO_START,
189 &bh);
190 if (!error)
191 brelse(bh);
192 }
193}
194
195/**
196 * inode_go_drop_th - unlock a glock
197 * @gl: the glock
198 *
199 * Invoked from rq_demote().
200 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
201 * is being purged from our node's glock cache; we're dropping lock.
202 */
203
204static void inode_go_drop_th(struct gfs2_glock *gl)
205{
206 gfs2_pte_inval(gl);
207 gfs2_glock_drop_th(gl);
208}
209
210/**
211 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
212 * @gl: the glock protecting the inode
213 * @flags:
214 *
215 */
216
217static void inode_go_sync(struct gfs2_glock *gl, int flags)
218{
219 int meta = (flags & DIO_METADATA);
220 int data = (flags & DIO_DATA);
221
222 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
223 if (meta && data) {
224 gfs2_page_sync(gl, flags | DIO_START);
b09e593d 225 gfs2_log_flush(gl->gl_sbd, gl);
b3b94faa
DT
226 gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
227 gfs2_page_sync(gl, flags | DIO_WAIT);
228 clear_bit(GLF_DIRTY, &gl->gl_flags);
229 } else if (meta) {
b09e593d 230 gfs2_log_flush(gl->gl_sbd, gl);
b3b94faa
DT
231 gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
232 } else if (data)
233 gfs2_page_sync(gl, flags | DIO_START | DIO_WAIT);
234 if (flags & DIO_RELEASE)
235 gfs2_ail_empty_gl(gl);
236 }
237
b3b94faa
DT
238}
239
240/**
241 * inode_go_inval - prepare a inode glock to be released
242 * @gl: the glock
243 * @flags:
244 *
245 */
246
247static void inode_go_inval(struct gfs2_glock *gl, int flags)
248{
249 int meta = (flags & DIO_METADATA);
250 int data = (flags & DIO_DATA);
251
252 if (meta) {
253 gfs2_meta_inval(gl);
254 gl->gl_vn++;
255 }
256 if (data)
257 gfs2_page_inval(gl);
258}
259
260/**
261 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
262 * @gl: the glock
263 *
264 * Returns: 1 if it's ok
265 */
266
267static int inode_go_demote_ok(struct gfs2_glock *gl)
268{
269 struct gfs2_sbd *sdp = gl->gl_sbd;
270 int demote = 0;
271
5c676f6d 272 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
b3b94faa
DT
273 demote = 1;
274 else if (!sdp->sd_args.ar_localcaching &&
275 time_after_eq(jiffies, gl->gl_stamp +
276 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
277 demote = 1;
278
279 return demote;
280}
281
282/**
283 * inode_go_lock - operation done after an inode lock is locked by a process
284 * @gl: the glock
285 * @flags:
286 *
287 * Returns: errno
288 */
289
290static int inode_go_lock(struct gfs2_holder *gh)
291{
292 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 293 struct gfs2_inode *ip = gl->gl_object;
b3b94faa
DT
294 int error = 0;
295
296 if (!ip)
297 return 0;
298
299 if (ip->i_vn != gl->gl_vn) {
300 error = gfs2_inode_refresh(ip);
301 if (error)
302 return error;
303 gfs2_inode_attr_in(ip);
304 }
305
306 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
307 (gl->gl_state == LM_ST_EXCLUSIVE) &&
308 (gh->gh_flags & GL_LOCAL_EXCL))
309 error = gfs2_truncatei_resume(ip);
310
311 return error;
312}
313
314/**
315 * inode_go_unlock - operation done before an inode lock is unlocked by a
316 * process
317 * @gl: the glock
318 * @flags:
319 *
320 */
321
322static void inode_go_unlock(struct gfs2_holder *gh)
323{
324 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 325 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 326
f45b7ddd
SW
327 if (ip) {
328 if (test_bit(GLF_DIRTY, &gl->gl_flags))
329 gfs2_inode_attr_in(ip);
b3b94faa 330
b3b94faa 331 gfs2_meta_cache_flush(ip);
f45b7ddd 332 }
b3b94faa
DT
333}
334
335/**
336 * inode_greedy -
337 * @gl: the glock
338 *
339 */
340
341static void inode_greedy(struct gfs2_glock *gl)
342{
343 struct gfs2_sbd *sdp = gl->gl_sbd;
5c676f6d 344 struct gfs2_inode *ip = gl->gl_object;
b3b94faa
DT
345 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
346 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
347 unsigned int new_time;
348
349 spin_lock(&ip->i_spin);
350
351 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
352 new_time = ip->i_greedy + quantum;
353 if (new_time > max)
354 new_time = max;
355 } else {
356 new_time = ip->i_greedy - quantum;
357 if (!new_time || new_time > max)
358 new_time = 1;
359 }
360
361 ip->i_greedy = new_time;
362
363 spin_unlock(&ip->i_spin);
364
feaa7bba 365 iput(&ip->i_inode);
b3b94faa
DT
366}
367
368/**
369 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
370 * @gl: the glock
371 *
372 * Returns: 1 if it's ok
373 */
374
375static int rgrp_go_demote_ok(struct gfs2_glock *gl)
376{
377 return !gl->gl_aspace->i_mapping->nrpages;
378}
379
380/**
381 * rgrp_go_lock - operation done after an rgrp lock is locked by
382 * a first holder on this node.
383 * @gl: the glock
384 * @flags:
385 *
386 * Returns: errno
387 */
388
389static int rgrp_go_lock(struct gfs2_holder *gh)
390{
5c676f6d 391 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
b3b94faa
DT
392}
393
394/**
395 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
396 * a last holder on this node.
397 * @gl: the glock
398 * @flags:
399 *
400 */
401
402static void rgrp_go_unlock(struct gfs2_holder *gh)
403{
5c676f6d 404 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
b3b94faa
DT
405}
406
407/**
408 * trans_go_xmote_th - promote/demote the transaction glock
409 * @gl: the glock
410 * @state: the requested state
411 * @flags:
412 *
413 */
414
415static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
416 int flags)
417{
418 struct gfs2_sbd *sdp = gl->gl_sbd;
419
420 if (gl->gl_state != LM_ST_UNLOCKED &&
421 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
422 gfs2_meta_syncfs(sdp);
423 gfs2_log_shutdown(sdp);
424 }
425
426 gfs2_glock_xmote_th(gl, state, flags);
427}
428
429/**
430 * trans_go_xmote_bh - After promoting/demoting the transaction glock
431 * @gl: the glock
432 *
433 */
434
435static void trans_go_xmote_bh(struct gfs2_glock *gl)
436{
437 struct gfs2_sbd *sdp = gl->gl_sbd;
feaa7bba 438 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
5c676f6d 439 struct gfs2_glock *j_gl = ip->i_gl;
b3b94faa
DT
440 struct gfs2_log_header head;
441 int error;
442
443 if (gl->gl_state != LM_ST_UNLOCKED &&
444 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
feaa7bba 445 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
b3b94faa
DT
446 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
447
448 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
449 if (error)
450 gfs2_consist(sdp);
451 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
452 gfs2_consist(sdp);
453
454 /* Initialize some head of the log stuff */
455 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
456 sdp->sd_log_sequence = head.lh_sequence + 1;
457 gfs2_log_pointers_init(sdp, head.lh_blkno);
458 }
459 }
460}
461
462/**
463 * trans_go_drop_th - unlock the transaction glock
464 * @gl: the glock
465 *
466 * We want to sync the device even with localcaching. Remember
467 * that localcaching journal replay only marks buffers dirty.
468 */
469
470static void trans_go_drop_th(struct gfs2_glock *gl)
471{
472 struct gfs2_sbd *sdp = gl->gl_sbd;
473
474 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
475 gfs2_meta_syncfs(sdp);
476 gfs2_log_shutdown(sdp);
477 }
478
479 gfs2_glock_drop_th(gl);
480}
481
482/**
483 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
484 * @gl: the glock
485 *
486 * Returns: 1 if it's ok
487 */
488
489static int quota_go_demote_ok(struct gfs2_glock *gl)
490{
491 return !atomic_read(&gl->gl_lvb_count);
492}
493
8fb4b536 494const struct gfs2_glock_operations gfs2_meta_glops = {
b3b94faa
DT
495 .go_xmote_th = gfs2_glock_xmote_th,
496 .go_drop_th = gfs2_glock_drop_th,
b3b94faa
DT
497 .go_type = LM_TYPE_META
498};
499
8fb4b536 500const struct gfs2_glock_operations gfs2_inode_glops = {
b3b94faa
DT
501 .go_xmote_th = inode_go_xmote_th,
502 .go_xmote_bh = inode_go_xmote_bh,
503 .go_drop_th = inode_go_drop_th,
504 .go_sync = inode_go_sync,
505 .go_inval = inode_go_inval,
506 .go_demote_ok = inode_go_demote_ok,
507 .go_lock = inode_go_lock,
508 .go_unlock = inode_go_unlock,
509 .go_greedy = inode_greedy,
510 .go_type = LM_TYPE_INODE
511};
512
8fb4b536 513const struct gfs2_glock_operations gfs2_rgrp_glops = {
b3b94faa
DT
514 .go_xmote_th = gfs2_glock_xmote_th,
515 .go_drop_th = gfs2_glock_drop_th,
516 .go_sync = meta_go_sync,
517 .go_inval = meta_go_inval,
518 .go_demote_ok = rgrp_go_demote_ok,
519 .go_lock = rgrp_go_lock,
520 .go_unlock = rgrp_go_unlock,
521 .go_type = LM_TYPE_RGRP
522};
523
8fb4b536 524const struct gfs2_glock_operations gfs2_trans_glops = {
b3b94faa
DT
525 .go_xmote_th = trans_go_xmote_th,
526 .go_xmote_bh = trans_go_xmote_bh,
527 .go_drop_th = trans_go_drop_th,
528 .go_type = LM_TYPE_NONDISK
529};
530
8fb4b536 531const struct gfs2_glock_operations gfs2_iopen_glops = {
b3b94faa
DT
532 .go_xmote_th = gfs2_glock_xmote_th,
533 .go_drop_th = gfs2_glock_drop_th,
534 .go_callback = gfs2_iopen_go_callback,
535 .go_type = LM_TYPE_IOPEN
536};
537
8fb4b536 538const struct gfs2_glock_operations gfs2_flock_glops = {
b3b94faa
DT
539 .go_xmote_th = gfs2_glock_xmote_th,
540 .go_drop_th = gfs2_glock_drop_th,
541 .go_type = LM_TYPE_FLOCK
542};
543
8fb4b536 544const struct gfs2_glock_operations gfs2_nondisk_glops = {
b3b94faa
DT
545 .go_xmote_th = gfs2_glock_xmote_th,
546 .go_drop_th = gfs2_glock_drop_th,
547 .go_type = LM_TYPE_NONDISK
548};
549
8fb4b536 550const struct gfs2_glock_operations gfs2_quota_glops = {
b3b94faa
DT
551 .go_xmote_th = gfs2_glock_xmote_th,
552 .go_drop_th = gfs2_glock_drop_th,
553 .go_demote_ok = quota_go_demote_ok,
554 .go_type = LM_TYPE_QUOTA
555};
556
8fb4b536 557const struct gfs2_glock_operations gfs2_journal_glops = {
b3b94faa
DT
558 .go_xmote_th = gfs2_glock_xmote_th,
559 .go_drop_th = gfs2_glock_drop_th,
560 .go_type = LM_TYPE_JOURNAL
561};
562