]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/gfs2/aops.c
GFS2: Fix journal check for spectator mounts
[net-next-2.6.git] / fs / gfs2 / aops.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
7eabb77e 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
fd88de56 16#include <linux/pagevec.h>
9b124fbb 17#include <linux/mpage.h>
d1665e41 18#include <linux/fs.h>
a8d638e3 19#include <linux/writeback.h>
7765ec26 20#include <linux/swap.h>
5c676f6d 21#include <linux/gfs2_ondisk.h>
47e83b50 22#include <linux/backing-dev.h>
b3b94faa
DT
23
24#include "gfs2.h"
5c676f6d 25#include "incore.h"
b3b94faa
DT
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
b3b94faa
DT
29#include "log.h"
30#include "meta_io.h"
b3b94faa
DT
31#include "quota.h"
32#include "trans.h"
18ec7d5c 33#include "rgrp.h"
cd81a4ba 34#include "super.h"
5c676f6d 35#include "util.h"
4340fe62 36#include "glops.h"
b3b94faa 37
ba7f7290 38
3921120e
BM
39void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
40 unsigned int from, unsigned int to)
ba7f7290
SW
41{
42 struct buffer_head *head = page_buffers(page);
43 unsigned int bsize = head->b_size;
44 struct buffer_head *bh;
45 unsigned int start, end;
46
47 for (bh = head, start = 0; bh != head || !start;
48 bh = bh->b_this_page, start = end) {
49 end = start + bsize;
50 if (end <= from || start >= to)
51 continue;
ddf4b426
BM
52 if (gfs2_is_jdata(ip))
53 set_buffer_uptodate(bh);
ba7f7290
SW
54 gfs2_trans_add_bh(ip->i_gl, bh, 0);
55 }
56}
57
b3b94faa 58/**
7a6bbacb 59 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
b3b94faa
DT
60 * @inode: The inode
61 * @lblock: The block number to look up
62 * @bh_result: The buffer head to return the result in
63 * @create: Non-zero if we may add block to the file
64 *
65 * Returns: errno
66 */
67
7a6bbacb
SW
68static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
69 struct buffer_head *bh_result, int create)
b3b94faa 70{
b3b94faa
DT
71 int error;
72
e9e1ef2b 73 error = gfs2_block_map(inode, lblock, bh_result, 0);
b3b94faa
DT
74 if (error)
75 return error;
de986e85 76 if (!buffer_mapped(bh_result))
7a6bbacb
SW
77 return -EIO;
78 return 0;
b3b94faa
DT
79}
80
7a6bbacb
SW
81static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
82 struct buffer_head *bh_result, int create)
623d9355 83{
e9e1ef2b 84 return gfs2_block_map(inode, lblock, bh_result, 0);
623d9355 85}
7a6bbacb 86
b3b94faa 87/**
9ff8ec32
SW
88 * gfs2_writepage_common - Common bits of writepage
89 * @page: The page to be written
90 * @wbc: The writeback control
b3b94faa 91 *
9ff8ec32 92 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
b3b94faa
DT
93 */
94
9ff8ec32
SW
95static int gfs2_writepage_common(struct page *page,
96 struct writeback_control *wbc)
b3b94faa 97{
18ec7d5c 98 struct inode *inode = page->mapping->host;
f4387149
SW
99 struct gfs2_inode *ip = GFS2_I(inode);
100 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c
SW
101 loff_t i_size = i_size_read(inode);
102 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
103 unsigned offset;
b3b94faa 104
9ff8ec32
SW
105 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
106 goto out;
5c676f6d 107 if (current->journal_info)
9ff8ec32 108 goto redirty;
18ec7d5c 109 /* Is the page fully outside i_size? (truncate in progress) */
9ff8ec32 110 offset = i_size & (PAGE_CACHE_SIZE-1);
d2d7b8a2 111 if (page->index > end_index || (page->index == end_index && !offset)) {
18ec7d5c 112 page->mapping->a_ops->invalidatepage(page, 0);
9ff8ec32 113 goto out;
b3b94faa 114 }
9ff8ec32
SW
115 return 1;
116redirty:
117 redirty_page_for_writepage(wbc, page);
118out:
119 unlock_page(page);
120 return 0;
121}
122
123/**
124 * gfs2_writeback_writepage - Write page for writeback mappings
125 * @page: The page
126 * @wbc: The writeback control
127 *
128 */
129
130static int gfs2_writeback_writepage(struct page *page,
131 struct writeback_control *wbc)
132{
133 int ret;
134
135 ret = gfs2_writepage_common(page, wbc);
136 if (ret <= 0)
137 return ret;
138
30116ff6 139 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
9ff8ec32
SW
140}
141
142/**
143 * gfs2_ordered_writepage - Write page for ordered data files
144 * @page: The page to write
145 * @wbc: The writeback control
146 *
147 */
148
149static int gfs2_ordered_writepage(struct page *page,
150 struct writeback_control *wbc)
151{
152 struct inode *inode = page->mapping->host;
153 struct gfs2_inode *ip = GFS2_I(inode);
154 int ret;
155
156 ret = gfs2_writepage_common(page, wbc);
157 if (ret <= 0)
158 return ret;
159
160 if (!page_has_buffers(page)) {
161 create_empty_buffers(page, inode->i_sb->s_blocksize,
162 (1 << BH_Dirty)|(1 << BH_Uptodate));
163 }
164 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
165 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
166}
167
b8e7cbb6
SW
168/**
169 * __gfs2_jdata_writepage - The core of jdata writepage
170 * @page: The page to write
171 * @wbc: The writeback control
172 *
173 * This is shared between writepage and writepages and implements the
174 * core of the writepage operation. If a transaction is required then
175 * PageChecked will have been set and the transaction will have
176 * already been started before this is called.
177 */
178
179static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
180{
181 struct inode *inode = page->mapping->host;
182 struct gfs2_inode *ip = GFS2_I(inode);
183 struct gfs2_sbd *sdp = GFS2_SB(inode);
184
185 if (PageChecked(page)) {
186 ClearPageChecked(page);
187 if (!page_has_buffers(page)) {
188 create_empty_buffers(page, inode->i_sb->s_blocksize,
189 (1 << BH_Dirty)|(1 << BH_Uptodate));
190 }
191 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
192 }
193 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
194}
195
9ff8ec32
SW
196/**
197 * gfs2_jdata_writepage - Write complete page
198 * @page: Page to write
199 *
200 * Returns: errno
201 *
202 */
203
204static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
205{
206 struct inode *inode = page->mapping->host;
9ff8ec32 207 struct gfs2_sbd *sdp = GFS2_SB(inode);
1bb7322f 208 int ret;
9ff8ec32
SW
209 int done_trans = 0;
210
bf36a713 211 if (PageChecked(page)) {
b8e7cbb6
SW
212 if (wbc->sync_mode != WB_SYNC_ALL)
213 goto out_ignore;
1bb7322f
SW
214 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
215 if (ret)
18ec7d5c 216 goto out_ignore;
18ec7d5c
SW
217 done_trans = 1;
218 }
1bb7322f
SW
219 ret = gfs2_writepage_common(page, wbc);
220 if (ret > 0)
221 ret = __gfs2_jdata_writepage(page, wbc);
18ec7d5c
SW
222 if (done_trans)
223 gfs2_trans_end(sdp);
1bb7322f 224 return ret;
18ec7d5c
SW
225
226out_ignore:
227 redirty_page_for_writepage(wbc, page);
228 unlock_page(page);
229 return 0;
b3b94faa
DT
230}
231
a8d638e3 232/**
5561093e 233 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
a8d638e3
SW
234 * @mapping: The mapping to write
235 * @wbc: Write-back control
236 *
5561093e 237 * For the data=writeback case we can already ignore buffer heads
a8d638e3
SW
238 * and write whole extents at once. This is a big reduction in the
239 * number of I/O requests we send and the bmap calls we make in this case.
240 */
5561093e
SW
241static int gfs2_writeback_writepages(struct address_space *mapping,
242 struct writeback_control *wbc)
a8d638e3 243{
5561093e 244 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
a8d638e3
SW
245}
246
b8e7cbb6
SW
247/**
248 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
249 * @mapping: The mapping
250 * @wbc: The writeback control
251 * @writepage: The writepage function to call for each page
252 * @pvec: The vector of pages
253 * @nr_pages: The number of pages to write
254 *
255 * Returns: non-zero if loop should terminate, zero otherwise
256 */
257
258static int gfs2_write_jdata_pagevec(struct address_space *mapping,
259 struct writeback_control *wbc,
260 struct pagevec *pvec,
261 int nr_pages, pgoff_t end)
262{
263 struct inode *inode = mapping->host;
264 struct gfs2_sbd *sdp = GFS2_SB(inode);
265 loff_t i_size = i_size_read(inode);
266 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
267 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
268 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
b8e7cbb6
SW
269 int i;
270 int ret;
271
20b95bf2 272 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
b8e7cbb6
SW
273 if (ret < 0)
274 return ret;
275
276 for(i = 0; i < nr_pages; i++) {
277 struct page *page = pvec->pages[i];
278
279 lock_page(page);
280
281 if (unlikely(page->mapping != mapping)) {
282 unlock_page(page);
283 continue;
284 }
285
286 if (!wbc->range_cyclic && page->index > end) {
287 ret = 1;
288 unlock_page(page);
289 continue;
290 }
291
292 if (wbc->sync_mode != WB_SYNC_NONE)
293 wait_on_page_writeback(page);
294
295 if (PageWriteback(page) ||
296 !clear_page_dirty_for_io(page)) {
297 unlock_page(page);
298 continue;
299 }
300
301 /* Is the page fully outside i_size? (truncate in progress) */
302 if (page->index > end_index || (page->index == end_index && !offset)) {
303 page->mapping->a_ops->invalidatepage(page, 0);
304 unlock_page(page);
305 continue;
306 }
307
308 ret = __gfs2_jdata_writepage(page, wbc);
309
310 if (ret || (--(wbc->nr_to_write) <= 0))
311 ret = 1;
b8e7cbb6
SW
312 }
313 gfs2_trans_end(sdp);
314 return ret;
315}
316
317/**
318 * gfs2_write_cache_jdata - Like write_cache_pages but different
319 * @mapping: The mapping to write
320 * @wbc: The writeback control
321 * @writepage: The writepage function to call
322 * @data: The data to pass to writepage
323 *
324 * The reason that we use our own function here is that we need to
325 * start transactions before we grab page locks. This allows us
326 * to get the ordering right.
327 */
328
329static int gfs2_write_cache_jdata(struct address_space *mapping,
330 struct writeback_control *wbc)
331{
b8e7cbb6
SW
332 int ret = 0;
333 int done = 0;
334 struct pagevec pvec;
335 int nr_pages;
336 pgoff_t index;
337 pgoff_t end;
338 int scanned = 0;
339 int range_whole = 0;
340
b8e7cbb6
SW
341 pagevec_init(&pvec, 0);
342 if (wbc->range_cyclic) {
343 index = mapping->writeback_index; /* Start from prev offset */
344 end = -1;
345 } else {
346 index = wbc->range_start >> PAGE_CACHE_SHIFT;
347 end = wbc->range_end >> PAGE_CACHE_SHIFT;
348 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
349 range_whole = 1;
350 scanned = 1;
351 }
352
353retry:
354 while (!done && (index <= end) &&
355 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
356 PAGECACHE_TAG_DIRTY,
357 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
358 scanned = 1;
359 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
360 if (ret)
361 done = 1;
362 if (ret > 0)
363 ret = 0;
364
365 pagevec_release(&pvec);
366 cond_resched();
367 }
368
369 if (!scanned && !done) {
370 /*
371 * We hit the last page and there is more work to be done: wrap
372 * back to the start of the file
373 */
374 scanned = 1;
375 index = 0;
376 goto retry;
377 }
378
379 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
380 mapping->writeback_index = index;
381 return ret;
382}
383
384
385/**
386 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
387 * @mapping: The mapping to write
388 * @wbc: The writeback control
389 *
390 */
391
392static int gfs2_jdata_writepages(struct address_space *mapping,
393 struct writeback_control *wbc)
394{
395 struct gfs2_inode *ip = GFS2_I(mapping->host);
396 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
397 int ret;
398
399 ret = gfs2_write_cache_jdata(mapping, wbc);
400 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
401 gfs2_log_flush(sdp, ip->i_gl);
402 ret = gfs2_write_cache_jdata(mapping, wbc);
403 }
404 return ret;
405}
406
b3b94faa
DT
407/**
408 * stuffed_readpage - Fill in a Linux page with stuffed file data
409 * @ip: the inode
410 * @page: the page
411 *
412 * Returns: errno
413 */
414
415static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
416{
417 struct buffer_head *dibh;
602c89d2 418 u64 dsize = i_size_read(&ip->i_inode);
b3b94faa
DT
419 void *kaddr;
420 int error;
421
bf126aee 422 /*
3c18ddd1 423 * Due to the order of unstuffing files and ->fault(), we can be
bf126aee
SW
424 * asked for a zero page in the case of a stuffed file being extended,
425 * so we need to supply one here. It doesn't happen often.
426 */
427 if (unlikely(page->index)) {
eebd2aa3 428 zero_user(page, 0, PAGE_CACHE_SIZE);
0a7ab79c 429 SetPageUptodate(page);
bf126aee
SW
430 return 0;
431 }
fd88de56 432
b3b94faa
DT
433 error = gfs2_meta_inode_buffer(ip, &dibh);
434 if (error)
435 return error;
436
5c4e9e03 437 kaddr = kmap_atomic(page, KM_USER0);
602c89d2
SW
438 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
439 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
440 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
441 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
c312c4fd 442 kunmap_atomic(kaddr, KM_USER0);
bf126aee 443 flush_dcache_page(page);
b3b94faa 444 brelse(dibh);
b3b94faa
DT
445 SetPageUptodate(page);
446
447 return 0;
448}
449
b3b94faa 450
b3b94faa 451/**
51ff87bd
SW
452 * __gfs2_readpage - readpage
453 * @file: The file to read a page for
b3b94faa
DT
454 * @page: The page to read
455 *
51ff87bd
SW
456 * This is the core of gfs2's readpage. Its used by the internal file
457 * reading code as in that case we already hold the glock. Also its
458 * called by gfs2_readpage() once the required lock has been granted.
459 *
b3b94faa
DT
460 */
461
51ff87bd 462static int __gfs2_readpage(void *file, struct page *page)
b3b94faa 463{
feaa7bba
SW
464 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
465 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
b3b94faa
DT
466 int error;
467
18ec7d5c 468 if (gfs2_is_stuffed(ip)) {
fd88de56
SW
469 error = stuffed_readpage(ip, page);
470 unlock_page(page);
51ff87bd 471 } else {
e9e1ef2b 472 error = mpage_readpage(page, gfs2_block_map);
51ff87bd 473 }
b3b94faa
DT
474
475 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
51ff87bd 476 return -EIO;
b3b94faa 477
51ff87bd
SW
478 return error;
479}
480
481/**
482 * gfs2_readpage - read a page of a file
483 * @file: The file to read
484 * @page: The page of the file
485 *
01b7c7ae
SW
486 * This deals with the locking required. We have to unlock and
487 * relock the page in order to get the locking in the right
488 * order.
51ff87bd
SW
489 */
490
491static int gfs2_readpage(struct file *file, struct page *page)
492{
01b7c7ae
SW
493 struct address_space *mapping = page->mapping;
494 struct gfs2_inode *ip = GFS2_I(mapping->host);
6802e340 495 struct gfs2_holder gh;
51ff87bd
SW
496 int error;
497
01b7c7ae 498 unlock_page(page);
719ee344
SW
499 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
500 error = gfs2_glock_nq(&gh);
01b7c7ae 501 if (unlikely(error))
6802e340 502 goto out;
01b7c7ae
SW
503 error = AOP_TRUNCATED_PAGE;
504 lock_page(page);
505 if (page->mapping == mapping && !PageUptodate(page))
506 error = __gfs2_readpage(file, page);
507 else
508 unlock_page(page);
6802e340 509 gfs2_glock_dq(&gh);
18ec7d5c 510out:
6802e340 511 gfs2_holder_uninit(&gh);
01b7c7ae
SW
512 if (error && error != AOP_TRUNCATED_PAGE)
513 lock_page(page);
51ff87bd
SW
514 return error;
515}
516
517/**
518 * gfs2_internal_read - read an internal file
519 * @ip: The gfs2 inode
520 * @ra_state: The readahead state (or NULL for no readahead)
521 * @buf: The buffer to fill
522 * @pos: The file position
523 * @size: The amount to read
524 *
525 */
526
527int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
528 char *buf, loff_t *pos, unsigned size)
529{
530 struct address_space *mapping = ip->i_inode.i_mapping;
531 unsigned long index = *pos / PAGE_CACHE_SIZE;
532 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
533 unsigned copied = 0;
534 unsigned amt;
535 struct page *page;
536 void *p;
537
538 do {
539 amt = size - copied;
540 if (offset + size > PAGE_CACHE_SIZE)
541 amt = PAGE_CACHE_SIZE - offset;
542 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
543 if (IS_ERR(page))
544 return PTR_ERR(page);
545 p = kmap_atomic(page, KM_USER0);
546 memcpy(buf + copied, p + offset, amt);
547 kunmap_atomic(p, KM_USER0);
548 mark_page_accessed(page);
549 page_cache_release(page);
550 copied += amt;
551 index++;
552 offset = 0;
553 } while(copied < size);
554 (*pos) += size;
555 return size;
fd88de56
SW
556}
557
fd88de56
SW
558/**
559 * gfs2_readpages - Read a bunch of pages at once
560 *
561 * Some notes:
562 * 1. This is only for readahead, so we can simply ignore any things
563 * which are slightly inconvenient (such as locking conflicts between
564 * the page lock and the glock) and return having done no I/O. Its
565 * obviously not something we'd want to do on too regular a basis.
566 * Any I/O we ignore at this time will be done via readpage later.
e1d5b18a 567 * 2. We don't handle stuffed files here we let readpage do the honours.
fd88de56 568 * 3. mpage_readpages() does most of the heavy lifting in the common case.
e9e1ef2b 569 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
fd88de56 570 */
3cc3f710 571
fd88de56
SW
572static int gfs2_readpages(struct file *file, struct address_space *mapping,
573 struct list_head *pages, unsigned nr_pages)
574{
575 struct inode *inode = mapping->host;
feaa7bba
SW
576 struct gfs2_inode *ip = GFS2_I(inode);
577 struct gfs2_sbd *sdp = GFS2_SB(inode);
fd88de56 578 struct gfs2_holder gh;
3cc3f710 579 int ret;
fd88de56 580
719ee344
SW
581 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
582 ret = gfs2_glock_nq(&gh);
51ff87bd 583 if (unlikely(ret))
3cc3f710 584 goto out_uninit;
e1d5b18a 585 if (!gfs2_is_stuffed(ip))
e9e1ef2b 586 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
3cc3f710
SW
587 gfs2_glock_dq(&gh);
588out_uninit:
589 gfs2_holder_uninit(&gh);
fd88de56
SW
590 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
591 ret = -EIO;
592 return ret;
b3b94faa
DT
593}
594
595/**
7765ec26 596 * gfs2_write_begin - Begin to write to a file
b3b94faa 597 * @file: The file to write to
7765ec26
SW
598 * @mapping: The mapping in which to write
599 * @pos: The file offset at which to start writing
600 * @len: Length of the write
601 * @flags: Various flags
602 * @pagep: Pointer to return the page
603 * @fsdata: Pointer to return fs data (unused by GFS2)
b3b94faa
DT
604 *
605 * Returns: errno
606 */
607
7765ec26
SW
608static int gfs2_write_begin(struct file *file, struct address_space *mapping,
609 loff_t pos, unsigned len, unsigned flags,
610 struct page **pagep, void **fsdata)
b3b94faa 611{
7765ec26
SW
612 struct gfs2_inode *ip = GFS2_I(mapping->host);
613 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
1946f70a 614 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
7ed122e4 615 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
18ec7d5c 616 int alloc_required;
b3b94faa 617 int error = 0;
18ec7d5c 618 struct gfs2_alloc *al;
7765ec26
SW
619 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
620 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
621 unsigned to = from + len;
622 struct page *page;
52ae7b79 623
719ee344
SW
624 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
625 error = gfs2_glock_nq(&ip->i_gh);
7765ec26 626 if (unlikely(error))
18ec7d5c 627 goto out_uninit;
1946f70a
BM
628 if (&ip->i_inode == sdp->sd_rindex) {
629 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
630 GL_NOCACHE, &m_ip->i_gh);
631 if (unlikely(error)) {
632 gfs2_glock_dq(&ip->i_gh);
633 goto out_uninit;
634 }
635 }
b3b94faa 636
461cb419 637 alloc_required = gfs2_write_alloc_required(ip, pos, len);
18ec7d5c 638
7ed122e4
SW
639 if (alloc_required || gfs2_is_jdata(ip))
640 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
641
18ec7d5c
SW
642 if (alloc_required) {
643 al = gfs2_alloc_get(ip);
182fe5ab
CG
644 if (!al) {
645 error = -ENOMEM;
646 goto out_unlock;
647 }
18ec7d5c 648
d82661d9 649 error = gfs2_quota_lock_check(ip);
18ec7d5c
SW
650 if (error)
651 goto out_alloc_put;
652
18ec7d5c
SW
653 al->al_requested = data_blocks + ind_blocks;
654 error = gfs2_inplace_reserve(ip);
655 if (error)
656 goto out_qunlock;
657 }
658
659 rblocks = RES_DINODE + ind_blocks;
660 if (gfs2_is_jdata(ip))
661 rblocks += data_blocks ? data_blocks : 1;
662 if (ind_blocks || data_blocks)
663 rblocks += RES_STATFS + RES_QUOTA;
1946f70a
BM
664 if (&ip->i_inode == sdp->sd_rindex)
665 rblocks += 2 * RES_STATFS;
18ec7d5c 666
16615be1
SW
667 error = gfs2_trans_begin(sdp, rblocks,
668 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
18ec7d5c 669 if (error)
a867bb28 670 goto out_trans_fail;
18ec7d5c 671
c41d4f09 672 error = -ENOMEM;
e4fefbac 673 flags |= AOP_FLAG_NOFS;
54566b2c 674 page = grab_cache_page_write_begin(mapping, index, flags);
c41d4f09
SW
675 *pagep = page;
676 if (unlikely(!page))
677 goto out_endtrans;
678
18ec7d5c 679 if (gfs2_is_stuffed(ip)) {
c41d4f09 680 error = 0;
7765ec26 681 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
f25ef0c1 682 error = gfs2_unstuff_dinode(ip, page);
5c4e9e03
SW
683 if (error == 0)
684 goto prepare_write;
c41d4f09 685 } else if (!PageUptodate(page)) {
b3b94faa 686 error = stuffed_readpage(ip, page);
c41d4f09 687 }
5c4e9e03 688 goto out;
18ec7d5c
SW
689 }
690
5c4e9e03 691prepare_write:
e9e1ef2b 692 error = block_prepare_write(page, from, to, gfs2_block_map);
18ec7d5c 693out:
c41d4f09
SW
694 if (error == 0)
695 return 0;
696
697 page_cache_release(page);
15c6fd97 698
ff8f33c8 699 gfs2_trans_end(sdp);
c41d4f09 700 if (pos + len > ip->i_inode.i_size)
ff8f33c8
SW
701 gfs2_trim_blocks(&ip->i_inode);
702 goto out_trans_fail;
703
c41d4f09
SW
704out_endtrans:
705 gfs2_trans_end(sdp);
a867bb28 706out_trans_fail:
c41d4f09
SW
707 if (alloc_required) {
708 gfs2_inplace_release(ip);
18ec7d5c 709out_qunlock:
c41d4f09 710 gfs2_quota_unlock(ip);
18ec7d5c 711out_alloc_put:
c41d4f09
SW
712 gfs2_alloc_put(ip);
713 }
18ec7d5c 714out_unlock:
1946f70a
BM
715 if (&ip->i_inode == sdp->sd_rindex) {
716 gfs2_glock_dq(&m_ip->i_gh);
717 gfs2_holder_uninit(&m_ip->i_gh);
718 }
c41d4f09 719 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 720out_uninit:
c41d4f09 721 gfs2_holder_uninit(&ip->i_gh);
b3b94faa
DT
722 return error;
723}
724
7ae8fa84
RP
725/**
726 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
727 * @inode: the rindex inode
728 */
729static void adjust_fs_space(struct inode *inode)
730{
731 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
1946f70a
BM
732 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
733 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
7ae8fa84
RP
734 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
735 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
1946f70a 736 struct buffer_head *m_bh, *l_bh;
7ae8fa84
RP
737 u64 fs_total, new_free;
738
739 /* Total up the file system space, according to the latest rindex. */
740 fs_total = gfs2_ri_total(sdp);
1946f70a
BM
741 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
742 return;
7ae8fa84
RP
743
744 spin_lock(&sdp->sd_statfs_spin);
1946f70a
BM
745 gfs2_statfs_change_in(m_sc, m_bh->b_data +
746 sizeof(struct gfs2_dinode));
7ae8fa84
RP
747 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
748 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
749 else
750 new_free = 0;
751 spin_unlock(&sdp->sd_statfs_spin);
6c53267f
RP
752 fs_warn(sdp, "File system extended by %llu blocks.\n",
753 (unsigned long long)new_free);
7ae8fa84 754 gfs2_statfs_change(sdp, new_free, new_free, 0);
1946f70a
BM
755
756 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
757 goto out;
758 update_statfs(sdp, m_bh, l_bh);
759 brelse(l_bh);
760out:
761 brelse(m_bh);
7ae8fa84
RP
762}
763
b3b94faa 764/**
7765ec26
SW
765 * gfs2_stuffed_write_end - Write end for stuffed files
766 * @inode: The inode
767 * @dibh: The buffer_head containing the on-disk inode
768 * @pos: The file position
769 * @len: The length of the write
770 * @copied: How much was actually copied by the VFS
771 * @page: The page
772 *
773 * This copies the data from the page into the inode block after
774 * the inode data structure itself.
775 *
776 * Returns: errno
777 */
778static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
779 loff_t pos, unsigned len, unsigned copied,
780 struct page *page)
781{
782 struct gfs2_inode *ip = GFS2_I(inode);
783 struct gfs2_sbd *sdp = GFS2_SB(inode);
1946f70a 784 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
7765ec26
SW
785 u64 to = pos + copied;
786 void *kaddr;
787 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
788 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
789
790 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
791 kaddr = kmap_atomic(page, KM_USER0);
792 memcpy(buf + pos, kaddr + pos, copied);
793 memset(kaddr + pos + copied, 0, len - copied);
794 flush_dcache_page(page);
795 kunmap_atomic(kaddr, KM_USER0);
796
797 if (!PageUptodate(page))
798 SetPageUptodate(page);
799 unlock_page(page);
800 page_cache_release(page);
801
7537d81a 802 if (copied) {
a2e0f799 803 if (inode->i_size < to)
7537d81a 804 i_size_write(inode, to);
7537d81a 805 gfs2_dinode_out(ip, di);
7765ec26
SW
806 mark_inode_dirty(inode);
807 }
808
9ae3c6de 809 if (inode == sdp->sd_rindex) {
7765ec26 810 adjust_fs_space(inode);
9ae3c6de
BM
811 ip->i_gh.gh_flags |= GL_NOCACHE;
812 }
7765ec26
SW
813
814 brelse(dibh);
815 gfs2_trans_end(sdp);
1946f70a
BM
816 if (inode == sdp->sd_rindex) {
817 gfs2_glock_dq(&m_ip->i_gh);
818 gfs2_holder_uninit(&m_ip->i_gh);
819 }
7765ec26
SW
820 gfs2_glock_dq(&ip->i_gh);
821 gfs2_holder_uninit(&ip->i_gh);
822 return copied;
823}
824
825/**
826 * gfs2_write_end
b3b94faa 827 * @file: The file to write to
7765ec26
SW
828 * @mapping: The address space to write to
829 * @pos: The file position
830 * @len: The length of the data
831 * @copied:
832 * @page: The page that has been written
833 * @fsdata: The fsdata (unused in GFS2)
834 *
835 * The main write_end function for GFS2. We have a separate one for
836 * stuffed files as they are slightly different, otherwise we just
837 * put our locking around the VFS provided functions.
b3b94faa
DT
838 *
839 * Returns: errno
840 */
841
7765ec26
SW
842static int gfs2_write_end(struct file *file, struct address_space *mapping,
843 loff_t pos, unsigned len, unsigned copied,
844 struct page *page, void *fsdata)
b3b94faa
DT
845{
846 struct inode *inode = page->mapping->host;
feaa7bba
SW
847 struct gfs2_inode *ip = GFS2_I(inode);
848 struct gfs2_sbd *sdp = GFS2_SB(inode);
1946f70a 849 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
18ec7d5c 850 struct buffer_head *dibh;
6dbd8224 851 struct gfs2_alloc *al = ip->i_alloc;
7765ec26
SW
852 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
853 unsigned int to = from + len;
854 int ret;
b3b94faa 855
7afd88d9 856 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
18ec7d5c 857
7765ec26
SW
858 ret = gfs2_meta_inode_buffer(ip, &dibh);
859 if (unlikely(ret)) {
860 unlock_page(page);
861 page_cache_release(page);
862 goto failed;
863 }
18ec7d5c
SW
864
865 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa 866
7765ec26
SW
867 if (gfs2_is_stuffed(ip))
868 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
b3b94faa 869
bf36a713 870 if (!gfs2_is_writeback(ip))
7765ec26 871 gfs2_page_add_databufs(ip, page, from, to);
b3b94faa 872
7765ec26 873 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
7537d81a 874 if (ret > 0) {
7537d81a 875 gfs2_dinode_out(ip, dibh->b_data);
9656b2c1 876 mark_inode_dirty(inode);
48516ced
SW
877 }
878
9ae3c6de 879 if (inode == sdp->sd_rindex) {
7ae8fa84 880 adjust_fs_space(inode);
9ae3c6de
BM
881 ip->i_gh.gh_flags |= GL_NOCACHE;
882 }
7ae8fa84 883
18ec7d5c
SW
884 brelse(dibh);
885 gfs2_trans_end(sdp);
7765ec26 886failed:
6dbd8224 887 if (al) {
18ec7d5c
SW
888 gfs2_inplace_release(ip);
889 gfs2_quota_unlock(ip);
890 gfs2_alloc_put(ip);
891 }
1946f70a
BM
892 if (inode == sdp->sd_rindex) {
893 gfs2_glock_dq(&m_ip->i_gh);
894 gfs2_holder_uninit(&m_ip->i_gh);
895 }
7765ec26 896 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 897 gfs2_holder_uninit(&ip->i_gh);
7765ec26 898 return ret;
b3b94faa
DT
899}
900
8fb68595
RP
901/**
902 * gfs2_set_page_dirty - Page dirtying function
903 * @page: The page to dirty
904 *
905 * Returns: 1 if it dirtyed the page, or 0 otherwise
906 */
907
908static int gfs2_set_page_dirty(struct page *page)
909{
5561093e 910 SetPageChecked(page);
8fb68595
RP
911 return __set_page_dirty_buffers(page);
912}
913
b3b94faa
DT
914/**
915 * gfs2_bmap - Block map function
916 * @mapping: Address space info
917 * @lblock: The block to map
918 *
919 * Returns: The disk address for the block or 0 on hole or error
920 */
921
922static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
923{
feaa7bba 924 struct gfs2_inode *ip = GFS2_I(mapping->host);
b3b94faa
DT
925 struct gfs2_holder i_gh;
926 sector_t dblock = 0;
927 int error;
928
b3b94faa
DT
929 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
930 if (error)
931 return 0;
932
933 if (!gfs2_is_stuffed(ip))
e9e1ef2b 934 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
b3b94faa
DT
935
936 gfs2_glock_dq_uninit(&i_gh);
937
938 return dblock;
939}
940
d7b616e2
SW
941static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
942{
943 struct gfs2_bufdata *bd;
944
945 lock_buffer(bh);
946 gfs2_log_lock(sdp);
947 clear_buffer_dirty(bh);
948 bd = bh->b_private;
949 if (bd) {
16615be1
SW
950 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
951 list_del_init(&bd->bd_le.le_list);
952 else
953 gfs2_remove_from_journal(bh, current->journal_info, 0);
d7b616e2
SW
954 }
955 bh->b_bdev = NULL;
956 clear_buffer_mapped(bh);
957 clear_buffer_req(bh);
958 clear_buffer_new(bh);
959 gfs2_log_unlock(sdp);
960 unlock_buffer(bh);
961}
962
8628de05 963static void gfs2_invalidatepage(struct page *page, unsigned long offset)
b3b94faa 964{
d7b616e2
SW
965 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
966 struct buffer_head *bh, *head;
967 unsigned long pos = 0;
968
b3b94faa 969 BUG_ON(!PageLocked(page));
8fb68595
RP
970 if (offset == 0)
971 ClearPageChecked(page);
d7b616e2
SW
972 if (!page_has_buffers(page))
973 goto out;
b3b94faa 974
d7b616e2
SW
975 bh = head = page_buffers(page);
976 do {
977 if (offset <= pos)
978 gfs2_discard(sdp, bh);
979 pos += bh->b_size;
980 bh = bh->b_this_page;
981 } while (bh != head);
982out:
983 if (offset == 0)
984 try_to_release_page(page, 0);
b3b94faa
DT
985}
986
c7b33834
SW
987/**
988 * gfs2_ok_for_dio - check that dio is valid on this file
989 * @ip: The inode
990 * @rw: READ or WRITE
991 * @offset: The offset at which we are reading or writing
992 *
993 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
994 * 1 (to accept the i/o request)
995 */
996static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
997{
998 /*
999 * Should we return an error here? I can't see that O_DIRECT for
5561093e
SW
1000 * a stuffed file makes any sense. For now we'll silently fall
1001 * back to buffered I/O
c7b33834 1002 */
c7b33834
SW
1003 if (gfs2_is_stuffed(ip))
1004 return 0;
1005
acb57a36 1006 if (offset >= i_size_read(&ip->i_inode))
c7b33834
SW
1007 return 0;
1008 return 1;
1009}
1010
1011
1012
a9e5f4d0
SW
1013static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
1014 const struct iovec *iov, loff_t offset,
1015 unsigned long nr_segs)
d1665e41
SW
1016{
1017 struct file *file = iocb->ki_filp;
1018 struct inode *inode = file->f_mapping->host;
feaa7bba 1019 struct gfs2_inode *ip = GFS2_I(inode);
d1665e41
SW
1020 struct gfs2_holder gh;
1021 int rv;
1022
1023 /*
c7b33834
SW
1024 * Deferred lock, even if its a write, since we do no allocation
1025 * on this path. All we need change is atime, and this lock mode
1026 * ensures that other nodes have flushed their buffered read caches
1027 * (i.e. their page cache entries for this inode). We do not,
1028 * unfortunately have the option of only flushing a range like
1029 * the VFS does.
d1665e41 1030 */
719ee344
SW
1031 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1032 rv = gfs2_glock_nq(&gh);
d1665e41 1033 if (rv)
c7b33834
SW
1034 return rv;
1035 rv = gfs2_ok_for_dio(ip, rw, offset);
1036 if (rv != 1)
1037 goto out; /* dio not valid, fall back to buffered i/o */
1038
eafdc7d1
CH
1039 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1040 offset, nr_segs, gfs2_get_block_direct,
1041 NULL, NULL, 0);
d1665e41
SW
1042out:
1043 gfs2_glock_dq_m(1, &gh);
1044 gfs2_holder_uninit(&gh);
d1665e41
SW
1045 return rv;
1046}
1047
4340fe62 1048/**
623d9355 1049 * gfs2_releasepage - free the metadata associated with a page
4340fe62
SW
1050 * @page: the page that's being released
1051 * @gfp_mask: passed from Linux VFS, ignored by us
1052 *
1053 * Call try_to_free_buffers() if the buffers in this page can be
1054 * released.
1055 *
1056 * Returns: 0
1057 */
1058
1059int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1060{
009d8518
SW
1061 struct address_space *mapping = page->mapping;
1062 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
4340fe62
SW
1063 struct buffer_head *bh, *head;
1064 struct gfs2_bufdata *bd;
4340fe62
SW
1065
1066 if (!page_has_buffers(page))
891ba6d4 1067 return 0;
4340fe62 1068
bb3b0e3d 1069 gfs2_log_lock(sdp);
4340fe62
SW
1070 head = bh = page_buffers(page);
1071 do {
bb3b0e3d
SW
1072 if (atomic_read(&bh->b_count))
1073 goto cannot_release;
1074 bd = bh->b_private;
1075 if (bd && bd->bd_ail)
1076 goto cannot_release;
4340fe62 1077 gfs2_assert_warn(sdp, !buffer_pinned(bh));
623d9355 1078 gfs2_assert_warn(sdp, !buffer_dirty(bh));
bb3b0e3d
SW
1079 bh = bh->b_this_page;
1080 } while(bh != head);
1081 gfs2_log_unlock(sdp);
4340fe62 1082
bb3b0e3d
SW
1083 head = bh = page_buffers(page);
1084 do {
623d9355 1085 gfs2_log_lock(sdp);
4340fe62
SW
1086 bd = bh->b_private;
1087 if (bd) {
1088 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1089 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
d7b616e2
SW
1090 if (!list_empty(&bd->bd_le.le_list)) {
1091 if (!buffer_pinned(bh))
1092 list_del_init(&bd->bd_le.le_list);
1093 else
1094 bd = NULL;
1095 }
1096 if (bd)
1097 bd->bd_bh = NULL;
4340fe62
SW
1098 bh->b_private = NULL;
1099 }
623d9355
SW
1100 gfs2_log_unlock(sdp);
1101 if (bd)
1102 kmem_cache_free(gfs2_bufdata_cachep, bd);
4340fe62
SW
1103
1104 bh = bh->b_this_page;
166afccd 1105 } while (bh != head);
4340fe62 1106
4340fe62 1107 return try_to_free_buffers(page);
bb3b0e3d
SW
1108cannot_release:
1109 gfs2_log_unlock(sdp);
1110 return 0;
4340fe62
SW
1111}
1112
5561093e 1113static const struct address_space_operations gfs2_writeback_aops = {
9ff8ec32 1114 .writepage = gfs2_writeback_writepage,
5561093e
SW
1115 .writepages = gfs2_writeback_writepages,
1116 .readpage = gfs2_readpage,
1117 .readpages = gfs2_readpages,
1118 .sync_page = block_sync_page,
1119 .write_begin = gfs2_write_begin,
1120 .write_end = gfs2_write_end,
1121 .bmap = gfs2_bmap,
1122 .invalidatepage = gfs2_invalidatepage,
1123 .releasepage = gfs2_releasepage,
1124 .direct_IO = gfs2_direct_IO,
e5d9dc27 1125 .migratepage = buffer_migrate_page,
229615de 1126 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1127 .error_remove_page = generic_error_remove_page,
5561093e
SW
1128};
1129
1130static const struct address_space_operations gfs2_ordered_aops = {
9ff8ec32 1131 .writepage = gfs2_ordered_writepage,
b3b94faa 1132 .readpage = gfs2_readpage,
fd88de56 1133 .readpages = gfs2_readpages,
b3b94faa 1134 .sync_page = block_sync_page,
7765ec26
SW
1135 .write_begin = gfs2_write_begin,
1136 .write_end = gfs2_write_end,
8fb68595 1137 .set_page_dirty = gfs2_set_page_dirty,
b3b94faa
DT
1138 .bmap = gfs2_bmap,
1139 .invalidatepage = gfs2_invalidatepage,
4340fe62 1140 .releasepage = gfs2_releasepage,
b3b94faa 1141 .direct_IO = gfs2_direct_IO,
e5d9dc27 1142 .migratepage = buffer_migrate_page,
229615de 1143 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1144 .error_remove_page = generic_error_remove_page,
b3b94faa
DT
1145};
1146
5561093e 1147static const struct address_space_operations gfs2_jdata_aops = {
9ff8ec32 1148 .writepage = gfs2_jdata_writepage,
b8e7cbb6 1149 .writepages = gfs2_jdata_writepages,
5561093e
SW
1150 .readpage = gfs2_readpage,
1151 .readpages = gfs2_readpages,
1152 .sync_page = block_sync_page,
1153 .write_begin = gfs2_write_begin,
1154 .write_end = gfs2_write_end,
1155 .set_page_dirty = gfs2_set_page_dirty,
1156 .bmap = gfs2_bmap,
1157 .invalidatepage = gfs2_invalidatepage,
1158 .releasepage = gfs2_releasepage,
229615de 1159 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1160 .error_remove_page = generic_error_remove_page,
5561093e
SW
1161};
1162
1163void gfs2_set_aops(struct inode *inode)
1164{
1165 struct gfs2_inode *ip = GFS2_I(inode);
1166
1167 if (gfs2_is_writeback(ip))
1168 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1169 else if (gfs2_is_ordered(ip))
1170 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1171 else if (gfs2_is_jdata(ip))
1172 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1173 else
1174 BUG();
1175}
1176