]>
Commit | Line | Data |
---|---|---|
fe4fa4b8 DC |
1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it would be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | #include "xfs.h" | |
19 | #include "xfs_fs.h" | |
20 | #include "xfs_types.h" | |
21 | #include "xfs_bit.h" | |
22 | #include "xfs_log.h" | |
23 | #include "xfs_inum.h" | |
24 | #include "xfs_trans.h" | |
25 | #include "xfs_sb.h" | |
26 | #include "xfs_ag.h" | |
27 | #include "xfs_dir2.h" | |
28 | #include "xfs_dmapi.h" | |
29 | #include "xfs_mount.h" | |
30 | #include "xfs_bmap_btree.h" | |
31 | #include "xfs_alloc_btree.h" | |
32 | #include "xfs_ialloc_btree.h" | |
33 | #include "xfs_btree.h" | |
34 | #include "xfs_dir2_sf.h" | |
35 | #include "xfs_attr_sf.h" | |
36 | #include "xfs_inode.h" | |
37 | #include "xfs_dinode.h" | |
38 | #include "xfs_error.h" | |
39 | #include "xfs_mru_cache.h" | |
40 | #include "xfs_filestream.h" | |
41 | #include "xfs_vnodeops.h" | |
42 | #include "xfs_utils.h" | |
43 | #include "xfs_buf_item.h" | |
44 | #include "xfs_inode_item.h" | |
45 | #include "xfs_rw.h" | |
46 | ||
a167b17e DC |
47 | #include <linux/kthread.h> |
48 | #include <linux/freezer.h> | |
49 | ||
fe4fa4b8 | 50 | /* |
683a8970 DC |
51 | * Sync all the inodes in the given AG according to the |
52 | * direction given by the flags. | |
fe4fa4b8 | 53 | */ |
683a8970 DC |
54 | STATIC int |
55 | xfs_sync_inodes_ag( | |
fe4fa4b8 | 56 | xfs_mount_t *mp, |
683a8970 | 57 | int ag, |
2030b5ab | 58 | int flags) |
fe4fa4b8 | 59 | { |
683a8970 | 60 | xfs_perag_t *pag = &mp->m_perag[ag]; |
683a8970 | 61 | int nr_found; |
8c38ab03 | 62 | uint32_t first_index = 0; |
683a8970 DC |
63 | int error = 0; |
64 | int last_error = 0; | |
65 | int fflag = XFS_B_ASYNC; | |
fe4fa4b8 | 66 | |
fe4fa4b8 DC |
67 | if (flags & SYNC_DELWRI) |
68 | fflag = XFS_B_DELWRI; | |
69 | if (flags & SYNC_WAIT) | |
70 | fflag = 0; /* synchronous overrides all */ | |
71 | ||
fe4fa4b8 | 72 | do { |
bc60a993 | 73 | struct inode *inode; |
bc60a993 | 74 | xfs_inode_t *ip = NULL; |
455486b9 | 75 | int lock_flags = XFS_ILOCK_SHARED; |
bc60a993 | 76 | |
fe4fa4b8 | 77 | /* |
683a8970 DC |
78 | * use a gang lookup to find the next inode in the tree |
79 | * as the tree is sparse and a gang lookup walks to find | |
80 | * the number of objects requested. | |
fe4fa4b8 | 81 | */ |
683a8970 DC |
82 | read_lock(&pag->pag_ici_lock); |
83 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | |
84 | (void**)&ip, first_index, 1); | |
fe4fa4b8 | 85 | |
683a8970 DC |
86 | if (!nr_found) { |
87 | read_unlock(&pag->pag_ici_lock); | |
88 | break; | |
fe4fa4b8 DC |
89 | } |
90 | ||
8c38ab03 DC |
91 | /* |
92 | * Update the index for the next lookup. Catch overflows | |
93 | * into the next AG range which can occur if we have inodes | |
94 | * in the last block of the AG and we are currently | |
95 | * pointing to the last inode. | |
96 | */ | |
683a8970 | 97 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
8c38ab03 DC |
98 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { |
99 | read_unlock(&pag->pag_ici_lock); | |
100 | break; | |
101 | } | |
fe4fa4b8 | 102 | |
683a8970 | 103 | /* nothing to sync during shutdown */ |
cb56a4b9 | 104 | if (XFS_FORCED_SHUTDOWN(mp)) { |
683a8970 | 105 | read_unlock(&pag->pag_ici_lock); |
fe4fa4b8 DC |
106 | return 0; |
107 | } | |
108 | ||
109 | /* | |
455486b9 DC |
110 | * If we can't get a reference on the inode, it must be |
111 | * in reclaim. Leave it for the reclaim code to flush. | |
fe4fa4b8 | 112 | */ |
455486b9 DC |
113 | inode = VFS_I(ip); |
114 | if (!igrab(inode)) { | |
683a8970 | 115 | read_unlock(&pag->pag_ici_lock); |
455486b9 DC |
116 | continue; |
117 | } | |
118 | read_unlock(&pag->pag_ici_lock); | |
119 | ||
6307091f DC |
120 | /* avoid new or bad inodes */ |
121 | if (is_bad_inode(inode) || | |
122 | xfs_iflags_test(ip, XFS_INEW)) { | |
455486b9 DC |
123 | IRELE(ip); |
124 | continue; | |
fe4fa4b8 | 125 | } |
bc60a993 | 126 | |
fe4fa4b8 DC |
127 | /* |
128 | * If we have to flush data or wait for I/O completion | |
455486b9 | 129 | * we need to hold the iolock. |
fe4fa4b8 | 130 | */ |
bc60a993 | 131 | if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) { |
455486b9 DC |
132 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
133 | lock_flags |= XFS_IOLOCK_SHARED; | |
683a8970 DC |
134 | error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); |
135 | if (flags & SYNC_IOWAIT) | |
25e41b3d | 136 | xfs_ioend_wait(ip); |
683a8970 | 137 | } |
455486b9 | 138 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
fe4fa4b8 | 139 | |
683a8970 | 140 | if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) { |
fe4fa4b8 DC |
141 | if (flags & SYNC_WAIT) { |
142 | xfs_iflock(ip); | |
683a8970 DC |
143 | if (!xfs_inode_clean(ip)) |
144 | error = xfs_iflush(ip, XFS_IFLUSH_SYNC); | |
145 | else | |
146 | xfs_ifunlock(ip); | |
fe4fa4b8 | 147 | } else if (xfs_iflock_nowait(ip)) { |
683a8970 DC |
148 | if (!xfs_inode_clean(ip)) |
149 | error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); | |
150 | else | |
151 | xfs_ifunlock(ip); | |
fe4fa4b8 DC |
152 | } |
153 | } | |
455486b9 | 154 | xfs_iput(ip, lock_flags); |
fe4fa4b8 | 155 | |
683a8970 | 156 | if (error) |
fe4fa4b8 | 157 | last_error = error; |
fe4fa4b8 DC |
158 | /* |
159 | * bail out if the filesystem is corrupted. | |
160 | */ | |
683a8970 | 161 | if (error == EFSCORRUPTED) |
fe4fa4b8 | 162 | return XFS_ERROR(error); |
fe4fa4b8 | 163 | |
683a8970 | 164 | } while (nr_found); |
fe4fa4b8 | 165 | |
683a8970 DC |
166 | return last_error; |
167 | } | |
fe4fa4b8 | 168 | |
683a8970 DC |
169 | int |
170 | xfs_sync_inodes( | |
171 | xfs_mount_t *mp, | |
2030b5ab | 172 | int flags) |
683a8970 DC |
173 | { |
174 | int error; | |
175 | int last_error; | |
176 | int i; | |
e9f1c6ee | 177 | int lflags = XFS_LOG_FORCE; |
fe4fa4b8 | 178 | |
683a8970 DC |
179 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
180 | return 0; | |
181 | error = 0; | |
182 | last_error = 0; | |
fe4fa4b8 | 183 | |
e9f1c6ee DC |
184 | if (flags & SYNC_WAIT) |
185 | lflags |= XFS_LOG_SYNC; | |
186 | ||
683a8970 DC |
187 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { |
188 | if (!mp->m_perag[i].pag_ici_init) | |
189 | continue; | |
2030b5ab | 190 | error = xfs_sync_inodes_ag(mp, i, flags); |
683a8970 DC |
191 | if (error) |
192 | last_error = error; | |
193 | if (error == EFSCORRUPTED) | |
194 | break; | |
195 | } | |
e9f1c6ee DC |
196 | if (flags & SYNC_DELWRI) |
197 | xfs_log_force(mp, 0, lflags); | |
198 | ||
fe4fa4b8 DC |
199 | return XFS_ERROR(last_error); |
200 | } | |
201 | ||
2af75df7 CH |
202 | STATIC int |
203 | xfs_commit_dummy_trans( | |
204 | struct xfs_mount *mp, | |
205 | uint log_flags) | |
206 | { | |
207 | struct xfs_inode *ip = mp->m_rootip; | |
208 | struct xfs_trans *tp; | |
209 | int error; | |
210 | ||
211 | /* | |
212 | * Put a dummy transaction in the log to tell recovery | |
213 | * that all others are OK. | |
214 | */ | |
215 | tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); | |
216 | error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); | |
217 | if (error) { | |
218 | xfs_trans_cancel(tp, 0); | |
219 | return error; | |
220 | } | |
221 | ||
222 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
223 | ||
224 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | |
225 | xfs_trans_ihold(tp, ip); | |
226 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
227 | /* XXX(hch): ignoring the error here.. */ | |
228 | error = xfs_trans_commit(tp, 0); | |
229 | ||
230 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
231 | ||
232 | xfs_log_force(mp, 0, log_flags); | |
233 | return 0; | |
234 | } | |
235 | ||
e9f1c6ee | 236 | int |
2af75df7 CH |
237 | xfs_sync_fsdata( |
238 | struct xfs_mount *mp, | |
239 | int flags) | |
240 | { | |
241 | struct xfs_buf *bp; | |
242 | struct xfs_buf_log_item *bip; | |
243 | int error = 0; | |
244 | ||
245 | /* | |
246 | * If this is xfssyncd() then only sync the superblock if we can | |
247 | * lock it without sleeping and it is not pinned. | |
248 | */ | |
249 | if (flags & SYNC_BDFLUSH) { | |
250 | ASSERT(!(flags & SYNC_WAIT)); | |
251 | ||
252 | bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); | |
253 | if (!bp) | |
254 | goto out; | |
255 | ||
256 | bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *); | |
257 | if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp)) | |
258 | goto out_brelse; | |
259 | } else { | |
260 | bp = xfs_getsb(mp, 0); | |
261 | ||
262 | /* | |
263 | * If the buffer is pinned then push on the log so we won't | |
264 | * get stuck waiting in the write for someone, maybe | |
265 | * ourselves, to flush the log. | |
266 | * | |
267 | * Even though we just pushed the log above, we did not have | |
268 | * the superblock buffer locked at that point so it can | |
269 | * become pinned in between there and here. | |
270 | */ | |
271 | if (XFS_BUF_ISPINNED(bp)) | |
272 | xfs_log_force(mp, 0, XFS_LOG_FORCE); | |
273 | } | |
274 | ||
275 | ||
276 | if (flags & SYNC_WAIT) | |
277 | XFS_BUF_UNASYNC(bp); | |
278 | else | |
279 | XFS_BUF_ASYNC(bp); | |
280 | ||
281 | return xfs_bwrite(mp, bp); | |
282 | ||
283 | out_brelse: | |
284 | xfs_buf_relse(bp); | |
285 | out: | |
286 | return error; | |
e9f1c6ee DC |
287 | } |
288 | ||
289 | /* | |
a4e4c4f4 DC |
290 | * When remounting a filesystem read-only or freezing the filesystem, we have |
291 | * two phases to execute. This first phase is syncing the data before we | |
292 | * quiesce the filesystem, and the second is flushing all the inodes out after | |
293 | * we've waited for all the transactions created by the first phase to | |
294 | * complete. The second phase ensures that the inodes are written to their | |
295 | * location on disk rather than just existing in transactions in the log. This | |
296 | * means after a quiesce there is no log replay required to write the inodes to | |
297 | * disk (this is the main difference between a sync and a quiesce). | |
298 | */ | |
299 | /* | |
300 | * First stage of freeze - no writers will make progress now we are here, | |
e9f1c6ee DC |
301 | * so we flush delwri and delalloc buffers here, then wait for all I/O to |
302 | * complete. Data is frozen at that point. Metadata is not frozen, | |
a4e4c4f4 DC |
303 | * transactions can still occur here so don't bother flushing the buftarg |
304 | * because it'll just get dirty again. | |
e9f1c6ee DC |
305 | */ |
306 | int | |
307 | xfs_quiesce_data( | |
308 | struct xfs_mount *mp) | |
309 | { | |
310 | int error; | |
311 | ||
312 | /* push non-blocking */ | |
313 | xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH); | |
314 | XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); | |
315 | xfs_filestream_flush(mp); | |
316 | ||
317 | /* push and block */ | |
318 | xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT); | |
319 | XFS_QM_DQSYNC(mp, SYNC_WAIT); | |
320 | ||
a4e4c4f4 | 321 | /* write superblock and hoover up shutdown errors */ |
e9f1c6ee DC |
322 | error = xfs_sync_fsdata(mp, 0); |
323 | ||
a4e4c4f4 | 324 | /* flush data-only devices */ |
e9f1c6ee DC |
325 | if (mp->m_rtdev_targp) |
326 | XFS_bflush(mp->m_rtdev_targp); | |
327 | ||
328 | return error; | |
2af75df7 CH |
329 | } |
330 | ||
76bf105c DC |
331 | STATIC void |
332 | xfs_quiesce_fs( | |
333 | struct xfs_mount *mp) | |
334 | { | |
335 | int count = 0, pincount; | |
336 | ||
337 | xfs_flush_buftarg(mp->m_ddev_targp, 0); | |
1dc3318a | 338 | xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); |
76bf105c DC |
339 | |
340 | /* | |
341 | * This loop must run at least twice. The first instance of the loop | |
342 | * will flush most meta data but that will generate more meta data | |
343 | * (typically directory updates). Which then must be flushed and | |
344 | * logged before we can write the unmount record. | |
345 | */ | |
346 | do { | |
347 | xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); | |
348 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); | |
349 | if (!pincount) { | |
350 | delay(50); | |
351 | count++; | |
352 | } | |
353 | } while (count < 2); | |
354 | } | |
355 | ||
356 | /* | |
357 | * Second stage of a quiesce. The data is already synced, now we have to take | |
358 | * care of the metadata. New transactions are already blocked, so we need to | |
359 | * wait for any remaining transactions to drain out before proceding. | |
360 | */ | |
361 | void | |
362 | xfs_quiesce_attr( | |
363 | struct xfs_mount *mp) | |
364 | { | |
365 | int error = 0; | |
366 | ||
367 | /* wait for all modifications to complete */ | |
368 | while (atomic_read(&mp->m_active_trans) > 0) | |
369 | delay(100); | |
370 | ||
371 | /* flush inodes and push all remaining buffers out to disk */ | |
372 | xfs_quiesce_fs(mp); | |
373 | ||
5e106572 FB |
374 | /* |
375 | * Just warn here till VFS can correctly support | |
376 | * read-only remount without racing. | |
377 | */ | |
378 | WARN_ON(atomic_read(&mp->m_active_trans) != 0); | |
76bf105c DC |
379 | |
380 | /* Push the superblock and write an unmount record */ | |
381 | error = xfs_log_sbcount(mp, 1); | |
382 | if (error) | |
383 | xfs_fs_cmn_err(CE_WARN, mp, | |
384 | "xfs_attr_quiesce: failed to log sb changes. " | |
385 | "Frozen image may not be consistent."); | |
386 | xfs_log_unmount_write(mp); | |
387 | xfs_unmountfs_writesb(mp); | |
388 | } | |
389 | ||
a167b17e DC |
390 | /* |
391 | * Enqueue a work item to be picked up by the vfs xfssyncd thread. | |
392 | * Doing this has two advantages: | |
393 | * - It saves on stack space, which is tight in certain situations | |
394 | * - It can be used (with care) as a mechanism to avoid deadlocks. | |
395 | * Flushing while allocating in a full filesystem requires both. | |
396 | */ | |
397 | STATIC void | |
398 | xfs_syncd_queue_work( | |
399 | struct xfs_mount *mp, | |
400 | void *data, | |
401 | void (*syncer)(struct xfs_mount *, void *)) | |
402 | { | |
403 | struct bhv_vfs_sync_work *work; | |
404 | ||
405 | work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP); | |
406 | INIT_LIST_HEAD(&work->w_list); | |
407 | work->w_syncer = syncer; | |
408 | work->w_data = data; | |
409 | work->w_mount = mp; | |
410 | spin_lock(&mp->m_sync_lock); | |
411 | list_add_tail(&work->w_list, &mp->m_sync_list); | |
412 | spin_unlock(&mp->m_sync_lock); | |
413 | wake_up_process(mp->m_sync_task); | |
414 | } | |
415 | ||
416 | /* | |
417 | * Flush delayed allocate data, attempting to free up reserved space | |
418 | * from existing allocations. At this point a new allocation attempt | |
419 | * has failed with ENOSPC and we are in the process of scratching our | |
420 | * heads, looking about for more room... | |
421 | */ | |
422 | STATIC void | |
423 | xfs_flush_inode_work( | |
424 | struct xfs_mount *mp, | |
425 | void *arg) | |
426 | { | |
427 | struct inode *inode = arg; | |
428 | filemap_flush(inode->i_mapping); | |
429 | iput(inode); | |
430 | } | |
431 | ||
432 | void | |
433 | xfs_flush_inode( | |
434 | xfs_inode_t *ip) | |
435 | { | |
436 | struct inode *inode = VFS_I(ip); | |
437 | ||
438 | igrab(inode); | |
439 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); | |
440 | delay(msecs_to_jiffies(500)); | |
441 | } | |
442 | ||
443 | /* | |
444 | * This is the "bigger hammer" version of xfs_flush_inode_work... | |
445 | * (IOW, "If at first you don't succeed, use a Bigger Hammer"). | |
446 | */ | |
447 | STATIC void | |
448 | xfs_flush_device_work( | |
449 | struct xfs_mount *mp, | |
450 | void *arg) | |
451 | { | |
452 | struct inode *inode = arg; | |
453 | sync_blockdev(mp->m_super->s_bdev); | |
454 | iput(inode); | |
455 | } | |
456 | ||
457 | void | |
458 | xfs_flush_device( | |
459 | xfs_inode_t *ip) | |
460 | { | |
461 | struct inode *inode = VFS_I(ip); | |
462 | ||
463 | igrab(inode); | |
464 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); | |
465 | delay(msecs_to_jiffies(500)); | |
466 | xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); | |
467 | } | |
468 | ||
aacaa880 DC |
469 | /* |
470 | * Every sync period we need to unpin all items, reclaim inodes, sync | |
471 | * quota and write out the superblock. We might need to cover the log | |
472 | * to indicate it is idle. | |
473 | */ | |
a167b17e DC |
474 | STATIC void |
475 | xfs_sync_worker( | |
476 | struct xfs_mount *mp, | |
477 | void *unused) | |
478 | { | |
479 | int error; | |
480 | ||
aacaa880 DC |
481 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
482 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); | |
1dc3318a | 483 | xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); |
aacaa880 DC |
484 | /* dgc: errors ignored here */ |
485 | error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); | |
486 | error = xfs_sync_fsdata(mp, SYNC_BDFLUSH); | |
487 | if (xfs_log_need_covered(mp)) | |
488 | error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE); | |
489 | } | |
a167b17e DC |
490 | mp->m_sync_seq++; |
491 | wake_up(&mp->m_wait_single_sync_task); | |
492 | } | |
493 | ||
494 | STATIC int | |
495 | xfssyncd( | |
496 | void *arg) | |
497 | { | |
498 | struct xfs_mount *mp = arg; | |
499 | long timeleft; | |
500 | bhv_vfs_sync_work_t *work, *n; | |
501 | LIST_HEAD (tmp); | |
502 | ||
503 | set_freezable(); | |
504 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); | |
505 | for (;;) { | |
506 | timeleft = schedule_timeout_interruptible(timeleft); | |
507 | /* swsusp */ | |
508 | try_to_freeze(); | |
509 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) | |
510 | break; | |
511 | ||
512 | spin_lock(&mp->m_sync_lock); | |
513 | /* | |
514 | * We can get woken by laptop mode, to do a sync - | |
515 | * that's the (only!) case where the list would be | |
516 | * empty with time remaining. | |
517 | */ | |
518 | if (!timeleft || list_empty(&mp->m_sync_list)) { | |
519 | if (!timeleft) | |
520 | timeleft = xfs_syncd_centisecs * | |
521 | msecs_to_jiffies(10); | |
522 | INIT_LIST_HEAD(&mp->m_sync_work.w_list); | |
523 | list_add_tail(&mp->m_sync_work.w_list, | |
524 | &mp->m_sync_list); | |
525 | } | |
526 | list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list) | |
527 | list_move(&work->w_list, &tmp); | |
528 | spin_unlock(&mp->m_sync_lock); | |
529 | ||
530 | list_for_each_entry_safe(work, n, &tmp, w_list) { | |
531 | (*work->w_syncer)(mp, work->w_data); | |
532 | list_del(&work->w_list); | |
533 | if (work == &mp->m_sync_work) | |
534 | continue; | |
535 | kmem_free(work); | |
536 | } | |
537 | } | |
538 | ||
539 | return 0; | |
540 | } | |
541 | ||
542 | int | |
543 | xfs_syncd_init( | |
544 | struct xfs_mount *mp) | |
545 | { | |
546 | mp->m_sync_work.w_syncer = xfs_sync_worker; | |
547 | mp->m_sync_work.w_mount = mp; | |
548 | mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); | |
549 | if (IS_ERR(mp->m_sync_task)) | |
550 | return -PTR_ERR(mp->m_sync_task); | |
551 | return 0; | |
552 | } | |
553 | ||
554 | void | |
555 | xfs_syncd_stop( | |
556 | struct xfs_mount *mp) | |
557 | { | |
558 | kthread_stop(mp->m_sync_task); | |
559 | } | |
560 | ||
fce08f2f | 561 | int |
1dc3318a | 562 | xfs_reclaim_inode( |
fce08f2f DC |
563 | xfs_inode_t *ip, |
564 | int locked, | |
565 | int sync_mode) | |
566 | { | |
567 | xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); | |
568 | ||
569 | /* The hash lock here protects a thread in xfs_iget_core from | |
570 | * racing with us on linking the inode back with a vnode. | |
571 | * Once we have the XFS_IRECLAIM flag set it will not touch | |
572 | * us. | |
573 | */ | |
574 | write_lock(&pag->pag_ici_lock); | |
575 | spin_lock(&ip->i_flags_lock); | |
576 | if (__xfs_iflags_test(ip, XFS_IRECLAIM) || | |
577 | !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { | |
578 | spin_unlock(&ip->i_flags_lock); | |
579 | write_unlock(&pag->pag_ici_lock); | |
580 | if (locked) { | |
581 | xfs_ifunlock(ip); | |
582 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
583 | } | |
584 | return 1; | |
585 | } | |
586 | __xfs_iflags_set(ip, XFS_IRECLAIM); | |
587 | spin_unlock(&ip->i_flags_lock); | |
588 | write_unlock(&pag->pag_ici_lock); | |
589 | xfs_put_perag(ip->i_mount, pag); | |
590 | ||
591 | /* | |
592 | * If the inode is still dirty, then flush it out. If the inode | |
593 | * is not in the AIL, then it will be OK to flush it delwri as | |
594 | * long as xfs_iflush() does not keep any references to the inode. | |
595 | * We leave that decision up to xfs_iflush() since it has the | |
596 | * knowledge of whether it's OK to simply do a delwri flush of | |
597 | * the inode or whether we need to wait until the inode is | |
598 | * pulled from the AIL. | |
599 | * We get the flush lock regardless, though, just to make sure | |
600 | * we don't free it while it is being flushed. | |
601 | */ | |
602 | if (!locked) { | |
603 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
604 | xfs_iflock(ip); | |
605 | } | |
606 | ||
607 | /* | |
608 | * In the case of a forced shutdown we rely on xfs_iflush() to | |
609 | * wait for the inode to be unpinned before returning an error. | |
610 | */ | |
611 | if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) { | |
612 | /* synchronize with xfs_iflush_done */ | |
613 | xfs_iflock(ip); | |
614 | xfs_ifunlock(ip); | |
615 | } | |
616 | ||
617 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
618 | xfs_ireclaim(ip); | |
619 | return 0; | |
620 | } | |
621 | ||
11654513 DC |
622 | /* |
623 | * We set the inode flag atomically with the radix tree tag. | |
624 | * Once we get tag lookups on the radix tree, this inode flag | |
625 | * can go away. | |
626 | */ | |
396beb85 DC |
627 | void |
628 | xfs_inode_set_reclaim_tag( | |
629 | xfs_inode_t *ip) | |
630 | { | |
631 | xfs_mount_t *mp = ip->i_mount; | |
632 | xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); | |
633 | ||
634 | read_lock(&pag->pag_ici_lock); | |
635 | spin_lock(&ip->i_flags_lock); | |
636 | radix_tree_tag_set(&pag->pag_ici_root, | |
637 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | |
11654513 | 638 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); |
396beb85 DC |
639 | spin_unlock(&ip->i_flags_lock); |
640 | read_unlock(&pag->pag_ici_lock); | |
641 | xfs_put_perag(mp, pag); | |
642 | } | |
643 | ||
644 | void | |
645 | __xfs_inode_clear_reclaim_tag( | |
646 | xfs_mount_t *mp, | |
647 | xfs_perag_t *pag, | |
648 | xfs_inode_t *ip) | |
649 | { | |
650 | radix_tree_tag_clear(&pag->pag_ici_root, | |
651 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | |
652 | } | |
653 | ||
654 | void | |
655 | xfs_inode_clear_reclaim_tag( | |
656 | xfs_inode_t *ip) | |
657 | { | |
658 | xfs_mount_t *mp = ip->i_mount; | |
659 | xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); | |
660 | ||
661 | read_lock(&pag->pag_ici_lock); | |
662 | spin_lock(&ip->i_flags_lock); | |
663 | __xfs_inode_clear_reclaim_tag(mp, pag, ip); | |
664 | spin_unlock(&ip->i_flags_lock); | |
665 | read_unlock(&pag->pag_ici_lock); | |
666 | xfs_put_perag(mp, pag); | |
667 | } | |
668 | ||
7a3be02b DC |
669 | |
670 | STATIC void | |
671 | xfs_reclaim_inodes_ag( | |
fce08f2f | 672 | xfs_mount_t *mp, |
7a3be02b DC |
673 | int ag, |
674 | int noblock, | |
fce08f2f DC |
675 | int mode) |
676 | { | |
7a3be02b DC |
677 | xfs_inode_t *ip = NULL; |
678 | xfs_perag_t *pag = &mp->m_perag[ag]; | |
679 | int nr_found; | |
8c38ab03 | 680 | uint32_t first_index; |
7a3be02b | 681 | int skipped; |
fce08f2f DC |
682 | |
683 | restart: | |
7a3be02b DC |
684 | first_index = 0; |
685 | skipped = 0; | |
686 | do { | |
687 | /* | |
688 | * use a gang lookup to find the next inode in the tree | |
689 | * as the tree is sparse and a gang lookup walks to find | |
690 | * the number of objects requested. | |
691 | */ | |
692 | read_lock(&pag->pag_ici_lock); | |
693 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | |
694 | (void**)&ip, first_index, 1, | |
695 | XFS_ICI_RECLAIM_TAG); | |
696 | ||
697 | if (!nr_found) { | |
698 | read_unlock(&pag->pag_ici_lock); | |
699 | break; | |
700 | } | |
701 | ||
8c38ab03 DC |
702 | /* |
703 | * Update the index for the next lookup. Catch overflows | |
704 | * into the next AG range which can occur if we have inodes | |
705 | * in the last block of the AG and we are currently | |
706 | * pointing to the last inode. | |
707 | */ | |
7a3be02b | 708 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
8c38ab03 DC |
709 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { |
710 | read_unlock(&pag->pag_ici_lock); | |
711 | break; | |
712 | } | |
7a3be02b | 713 | |
7a3be02b DC |
714 | /* ignore if already under reclaim */ |
715 | if (xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
716 | read_unlock(&pag->pag_ici_lock); | |
717 | continue; | |
718 | } | |
719 | ||
fce08f2f | 720 | if (noblock) { |
7a3be02b DC |
721 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { |
722 | read_unlock(&pag->pag_ici_lock); | |
fce08f2f | 723 | continue; |
7a3be02b | 724 | } |
fce08f2f DC |
725 | if (xfs_ipincount(ip) || |
726 | !xfs_iflock_nowait(ip)) { | |
727 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
7a3be02b | 728 | read_unlock(&pag->pag_ici_lock); |
fce08f2f DC |
729 | continue; |
730 | } | |
731 | } | |
7a3be02b DC |
732 | read_unlock(&pag->pag_ici_lock); |
733 | ||
734 | /* | |
735 | * hmmm - this is an inode already in reclaim. Do | |
736 | * we even bother catching it here? | |
737 | */ | |
1dc3318a | 738 | if (xfs_reclaim_inode(ip, noblock, mode)) |
7a3be02b DC |
739 | skipped++; |
740 | } while (nr_found); | |
741 | ||
742 | if (skipped) { | |
743 | delay(1); | |
fce08f2f DC |
744 | goto restart; |
745 | } | |
7a3be02b DC |
746 | return; |
747 | ||
748 | } | |
749 | ||
750 | int | |
751 | xfs_reclaim_inodes( | |
752 | xfs_mount_t *mp, | |
753 | int noblock, | |
754 | int mode) | |
755 | { | |
756 | int i; | |
757 | ||
758 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { | |
759 | if (!mp->m_perag[i].pag_ici_init) | |
760 | continue; | |
761 | xfs_reclaim_inodes_ag(mp, i, noblock, mode); | |
762 | } | |
fce08f2f DC |
763 | return 0; |
764 | } | |
765 | ||
766 |