]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ufs/util.c
slab: ensure cache_alloc_refill terminates
[net-next-2.6.git] / fs / ufs / util.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/ufs/util.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
8
9#include <linux/string.h>
10#include <linux/slab.h>
11#include <linux/ufs_fs.h>
12#include <linux/buffer_head.h>
13
14#include "swab.h"
15#include "util.h"
16
1da177e4
LT
17struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
18 struct super_block *sb, u64 fragment, u64 size)
19{
20 struct ufs_buffer_head * ubh;
21 unsigned i, j ;
22 u64 count = 0;
23 if (size & ~uspi->s_fmask)
24 return NULL;
25 count = size >> uspi->s_fshift;
26 if (count > UFS_MAXFRAG)
27 return NULL;
28 ubh = (struct ufs_buffer_head *)
29 kmalloc (sizeof (struct ufs_buffer_head), GFP_KERNEL);
30 if (!ubh)
31 return NULL;
32 ubh->fragment = fragment;
33 ubh->count = count;
34 for (i = 0; i < count; i++)
35 if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
36 goto failed;
37 for (; i < UFS_MAXFRAG; i++)
38 ubh->bh[i] = NULL;
39 return ubh;
40failed:
41 for (j = 0; j < i; j++)
42 brelse (ubh->bh[j]);
43 kfree(ubh);
44 return NULL;
45}
46
47struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
48 struct super_block *sb, u64 fragment, u64 size)
49{
50 unsigned i, j;
51 u64 count = 0;
52 if (size & ~uspi->s_fmask)
53 return NULL;
54 count = size >> uspi->s_fshift;
55 if (count <= 0 || count > UFS_MAXFRAG)
56 return NULL;
9695ef16
ED
57 USPI_UBH(uspi)->fragment = fragment;
58 USPI_UBH(uspi)->count = count;
1da177e4 59 for (i = 0; i < count; i++)
9695ef16 60 if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i)))
1da177e4
LT
61 goto failed;
62 for (; i < UFS_MAXFRAG; i++)
9695ef16
ED
63 USPI_UBH(uspi)->bh[i] = NULL;
64 return USPI_UBH(uspi);
1da177e4
LT
65failed:
66 for (j = 0; j < i; j++)
9695ef16 67 brelse (USPI_UBH(uspi)->bh[j]);
1da177e4
LT
68 return NULL;
69}
70
71void ubh_brelse (struct ufs_buffer_head * ubh)
72{
73 unsigned i;
74 if (!ubh)
75 return;
76 for (i = 0; i < ubh->count; i++)
77 brelse (ubh->bh[i]);
78 kfree (ubh);
79}
80
81void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
82{
83 unsigned i;
9695ef16 84 if (!USPI_UBH(uspi))
1da177e4 85 return;
9695ef16
ED
86 for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) {
87 brelse (USPI_UBH(uspi)->bh[i]);
88 USPI_UBH(uspi)->bh[i] = NULL;
1da177e4
LT
89 }
90}
91
92void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
93{
94 unsigned i;
95 if (!ubh)
96 return;
97 for ( i = 0; i < ubh->count; i++ )
98 mark_buffer_dirty (ubh->bh[i]);
99}
100
101void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
102{
103 unsigned i;
104 if (!ubh)
105 return;
106 if (flag) {
107 for ( i = 0; i < ubh->count; i++ )
108 set_buffer_uptodate (ubh->bh[i]);
109 } else {
110 for ( i = 0; i < ubh->count; i++ )
111 clear_buffer_uptodate (ubh->bh[i]);
112 }
113}
114
098d5af7 115void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
1da177e4 116{
1da177e4
LT
117 if (!ubh)
118 return;
098d5af7
ED
119
120 ll_rw_block(rw, ubh->count, ubh->bh);
1da177e4
LT
121}
122
123void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
124{
125 unsigned i;
126 if (!ubh)
127 return;
128 for ( i = 0; i < ubh->count; i++ )
129 wait_on_buffer (ubh->bh[i]);
130}
131
1da177e4
LT
132void ubh_bforget (struct ufs_buffer_head * ubh)
133{
134 unsigned i;
135 if (!ubh)
136 return;
137 for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] )
138 bforget (ubh->bh[i]);
139}
140
141int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
142{
143 unsigned i;
144 unsigned result = 0;
145 if (!ubh)
146 return 0;
147 for ( i = 0; i < ubh->count; i++ )
148 result |= buffer_dirty(ubh->bh[i]);
149 return result;
150}
151
152void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi,
153 unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
154{
155 unsigned len, bhno;
156 if (size > (ubh->count << uspi->s_fshift))
157 size = ubh->count << uspi->s_fshift;
158 bhno = 0;
159 while (size) {
160 len = min_t(unsigned int, size, uspi->s_fsize);
161 memcpy (mem, ubh->bh[bhno]->b_data, len);
162 mem += uspi->s_fsize;
163 size -= len;
164 bhno++;
165 }
166}
167
168void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi,
169 struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
170{
171 unsigned len, bhno;
172 if (size > (ubh->count << uspi->s_fshift))
173 size = ubh->count << uspi->s_fshift;
174 bhno = 0;
175 while (size) {
176 len = min_t(unsigned int, size, uspi->s_fsize);
177 memcpy (ubh->bh[bhno]->b_data, mem, len);
178 mem += uspi->s_fsize;
179 size -= len;
180 bhno++;
181 }
182}
183
184dev_t
185ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
186{
44aa5359 187 __u32 fs32;
1da177e4
LT
188 dev_t dev;
189
190 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
44aa5359 191 fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[1]);
1da177e4 192 else
44aa5359 193 fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[0]);
1da177e4
LT
194 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
195 case UFS_ST_SUNx86:
196 case UFS_ST_SUN:
197 if ((fs32 & 0xffff0000) == 0 ||
198 (fs32 & 0xffff0000) == 0xffff0000)
199 dev = old_decode_dev(fs32 & 0x7fff);
200 else
201 dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
202 break;
203
204 default:
205 dev = old_decode_dev(fs32);
206 break;
207 }
208 return dev;
209}
210
211void
212ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
213{
44aa5359 214 __u32 fs32;
1da177e4
LT
215
216 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
217 case UFS_ST_SUNx86:
218 case UFS_ST_SUN:
219 fs32 = sysv_encode_dev(dev);
220 if ((fs32 & 0xffff8000) == 0) {
221 fs32 = old_encode_dev(dev);
222 }
223 break;
224
225 default:
226 fs32 = old_encode_dev(dev);
227 break;
228 }
1da177e4 229 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
44aa5359 230 ufsi->i_u1.i_data[1] = cpu_to_fs32(sb, fs32);
1da177e4 231 else
44aa5359 232 ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32);
1da177e4 233}
10e5dce0
ED
234
235/**
236 * ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist
237 * read it from disk.
238 * @mapping: the address_space to search
239 * @index: the page index
240 *
241 * Locates the desired pagecache page, if not exist we'll read it,
242 * locks it, increments its reference
243 * count and returns its address.
244 *
245 */
246
247struct page *ufs_get_locked_page(struct address_space *mapping,
248 pgoff_t index)
249{
250 struct page *page;
251
10e5dce0
ED
252 page = find_lock_page(mapping, index);
253 if (!page) {
254 page = read_cache_page(mapping, index,
255 (filler_t*)mapping->a_ops->readpage,
256 NULL);
1fb32b7b 257
10e5dce0
ED
258 if (IS_ERR(page)) {
259 printk(KERN_ERR "ufs_change_blocknr: "
260 "read_cache_page error: ino %lu, index: %lu\n",
261 mapping->host->i_ino, index);
262 goto out;
263 }
264
265 lock_page(page);
266
1fb32b7b
ED
267 if (unlikely(page->mapping == NULL)) {
268 /* Truncate got there first */
269 unlock_page(page);
270 page_cache_release(page);
06fa45d3
ED
271 page = NULL;
272 goto out;
1fb32b7b
ED
273 }
274
10e5dce0
ED
275 if (!PageUptodate(page) || PageError(page)) {
276 unlock_page(page);
277 page_cache_release(page);
278
279 printk(KERN_ERR "ufs_change_blocknr: "
280 "can not read page: ino %lu, index: %lu\n",
281 mapping->host->i_ino, index);
282
283 page = ERR_PTR(-EIO);
10e5dce0
ED
284 }
285 }
10e5dce0
ED
286out:
287 return page;
288}