]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/minix/dir.c
get rid of BKL in fs/efs
[net-next-2.6.git] / fs / minix / dir.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/minix/dir.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * minix directory handling functions
939b00df
AB
7 *
8 * Updated to filesystem version 3 by Daniel Aragones
1da177e4
LT
9 */
10
11#include "minix.h"
4a66af9e 12#include <linux/buffer_head.h>
1da177e4
LT
13#include <linux/highmem.h>
14#include <linux/smp_lock.h>
4a66af9e 15#include <linux/swap.h>
1da177e4
LT
16
17typedef struct minix_dir_entry minix_dirent;
939b00df 18typedef struct minix3_dir_entry minix3_dirent;
1da177e4
LT
19
20static int minix_readdir(struct file *, void *, filldir_t);
21
4b6f5d20 22const struct file_operations minix_dir_operations = {
1da177e4
LT
23 .read = generic_read_dir,
24 .readdir = minix_readdir,
0d7916d7 25 .fsync = simple_fsync,
1da177e4
LT
26};
27
28static inline void dir_put_page(struct page *page)
29{
30 kunmap(page);
31 page_cache_release(page);
32}
33
34/*
35 * Return the offset into page `page_nr' of the last valid
36 * byte in that page, plus one.
37 */
38static unsigned
39minix_last_byte(struct inode *inode, unsigned long page_nr)
40{
41 unsigned last_byte = PAGE_CACHE_SIZE;
42
43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
45 return last_byte;
46}
47
48static inline unsigned long dir_pages(struct inode *inode)
49{
50 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
51}
52
4a66af9e 53static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
1da177e4 54{
4a66af9e
NP
55 struct address_space *mapping = page->mapping;
56 struct inode *dir = mapping->host;
1da177e4 57 int err = 0;
4a66af9e
NP
58 block_write_end(NULL, mapping, pos, len, len, page, NULL);
59
60 if (pos+len > dir->i_size) {
61 i_size_write(dir, pos+len);
62 mark_inode_dirty(dir);
63 }
1da177e4
LT
64 if (IS_DIRSYNC(dir))
65 err = write_one_page(page, 1);
66 else
67 unlock_page(page);
68 return err;
69}
70
71static struct page * dir_get_page(struct inode *dir, unsigned long n)
72{
73 struct address_space *mapping = dir->i_mapping;
090d2b18 74 struct page *page = read_mapping_page(mapping, n, NULL);
1da177e4 75 if (!IS_ERR(page)) {
1da177e4
LT
76 kmap(page);
77 if (!PageUptodate(page))
78 goto fail;
79 }
80 return page;
81
82fail:
83 dir_put_page(page);
84 return ERR_PTR(-EIO);
85}
86
87static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
88{
89 return (void*)((char*)de + sbi->s_dirsize);
90}
91
92static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
93{
94 unsigned long pos = filp->f_pos;
dcf258ae 95 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4
LT
96 struct super_block *sb = inode->i_sb;
97 unsigned offset = pos & ~PAGE_CACHE_MASK;
98 unsigned long n = pos >> PAGE_CACHE_SHIFT;
99 unsigned long npages = dir_pages(inode);
100 struct minix_sb_info *sbi = minix_sb(sb);
101 unsigned chunk_size = sbi->s_dirsize;
939b00df
AB
102 char *name;
103 __u32 inumber;
1da177e4
LT
104
105 lock_kernel();
106
107 pos = (pos + chunk_size-1) & ~(chunk_size-1);
108 if (pos >= inode->i_size)
109 goto done;
110
111 for ( ; n < npages; n++, offset = 0) {
112 char *p, *kaddr, *limit;
113 struct page *page = dir_get_page(inode, n);
114
115 if (IS_ERR(page))
116 continue;
117 kaddr = (char *)page_address(page);
118 p = kaddr+offset;
119 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
939b00df
AB
120 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
121 if (sbi->s_version == MINIX_V3) {
122 minix3_dirent *de3 = (minix3_dirent *)p;
123 name = de3->name;
124 inumber = de3->inode;
125 } else {
126 minix_dirent *de = (minix_dirent *)p;
127 name = de->name;
128 inumber = de->inode;
129 }
130 if (inumber) {
1da177e4 131 int over;
1da177e4 132
939b00df 133 unsigned l = strnlen(name, sbi->s_namelen);
1da177e4 134 offset = p - kaddr;
939b00df
AB
135 over = filldir(dirent, name, l,
136 (n << PAGE_CACHE_SHIFT) | offset,
137 inumber, DT_UNKNOWN);
1da177e4
LT
138 if (over) {
139 dir_put_page(page);
140 goto done;
141 }
142 }
143 }
144 dir_put_page(page);
145 }
146
147done:
148 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
149 unlock_kernel();
150 return 0;
151}
152
153static inline int namecompare(int len, int maxlen,
154 const char * name, const char * buffer)
155{
156 if (len < maxlen && buffer[len])
157 return 0;
158 return !memcmp(name, buffer, len);
159}
160
161/*
162 * minix_find_entry()
163 *
164 * finds an entry in the specified directory with the wanted name. It
165 * returns the cache buffer in which the entry was found, and the entry
166 * itself (as a parameter - res_dir). It does NOT read the inode of the
167 * entry - you'll have to do that yourself if you want to.
168 */
169minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
170{
171 const char * name = dentry->d_name.name;
172 int namelen = dentry->d_name.len;
173 struct inode * dir = dentry->d_parent->d_inode;
174 struct super_block * sb = dir->i_sb;
175 struct minix_sb_info * sbi = minix_sb(sb);
176 unsigned long n;
177 unsigned long npages = dir_pages(dir);
178 struct page *page = NULL;
939b00df 179 char *p;
1da177e4 180
939b00df
AB
181 char *namx;
182 __u32 inumber;
1da177e4
LT
183 *res_page = NULL;
184
185 for (n = 0; n < npages; n++) {
939b00df
AB
186 char *kaddr, *limit;
187
1da177e4
LT
188 page = dir_get_page(dir, n);
189 if (IS_ERR(page))
190 continue;
191
192 kaddr = (char*)page_address(page);
939b00df
AB
193 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
194 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
195 if (sbi->s_version == MINIX_V3) {
196 minix3_dirent *de3 = (minix3_dirent *)p;
197 namx = de3->name;
198 inumber = de3->inode;
199 } else {
200 minix_dirent *de = (minix_dirent *)p;
201 namx = de->name;
202 inumber = de->inode;
203 }
204 if (!inumber)
1da177e4 205 continue;
939b00df 206 if (namecompare(namelen, sbi->s_namelen, name, namx))
1da177e4
LT
207 goto found;
208 }
209 dir_put_page(page);
210 }
211 return NULL;
212
213found:
214 *res_page = page;
939b00df 215 return (minix_dirent *)p;
1da177e4
LT
216}
217
218int minix_add_link(struct dentry *dentry, struct inode *inode)
219{
220 struct inode *dir = dentry->d_parent->d_inode;
221 const char * name = dentry->d_name.name;
222 int namelen = dentry->d_name.len;
223 struct super_block * sb = dir->i_sb;
224 struct minix_sb_info * sbi = minix_sb(sb);
225 struct page *page = NULL;
1da177e4
LT
226 unsigned long npages = dir_pages(dir);
227 unsigned long n;
939b00df
AB
228 char *kaddr, *p;
229 minix_dirent *de;
230 minix3_dirent *de3;
4a66af9e 231 loff_t pos;
1da177e4 232 int err;
939b00df
AB
233 char *namx = NULL;
234 __u32 inumber;
1da177e4
LT
235
236 /*
237 * We take care of directory expansion in the same loop
238 * This code plays outside i_size, so it locks the page
239 * to protect that region.
240 */
241 for (n = 0; n <= npages; n++) {
939b00df 242 char *limit, *dir_end;
1da177e4
LT
243
244 page = dir_get_page(dir, n);
245 err = PTR_ERR(page);
246 if (IS_ERR(page))
247 goto out;
248 lock_page(page);
249 kaddr = (char*)page_address(page);
250 dir_end = kaddr + minix_last_byte(dir, n);
939b00df
AB
251 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
252 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
253 de = (minix_dirent *)p;
254 de3 = (minix3_dirent *)p;
255 if (sbi->s_version == MINIX_V3) {
256 namx = de3->name;
257 inumber = de3->inode;
258 } else {
259 namx = de->name;
260 inumber = de->inode;
261 }
262 if (p == dir_end) {
1da177e4 263 /* We hit i_size */
939b00df
AB
264 if (sbi->s_version == MINIX_V3)
265 de3->inode = 0;
266 else
267 de->inode = 0;
1da177e4
LT
268 goto got_it;
269 }
939b00df 270 if (!inumber)
1da177e4
LT
271 goto got_it;
272 err = -EEXIST;
939b00df 273 if (namecompare(namelen, sbi->s_namelen, name, namx))
1da177e4 274 goto out_unlock;
1da177e4
LT
275 }
276 unlock_page(page);
277 dir_put_page(page);
278 }
279 BUG();
280 return -EINVAL;
281
282got_it:
d6b54841 283 pos = page_offset(page) + p - (char *)page_address(page);
4a66af9e
NP
284 err = __minix_write_begin(NULL, page->mapping, pos, sbi->s_dirsize,
285 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
1da177e4
LT
286 if (err)
287 goto out_unlock;
939b00df
AB
288 memcpy (namx, name, namelen);
289 if (sbi->s_version == MINIX_V3) {
290 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
291 de3->inode = inode->i_ino;
292 } else {
293 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
294 de->inode = inode->i_ino;
295 }
4a66af9e 296 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
1da177e4
LT
297 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
298 mark_inode_dirty(dir);
299out_put:
300 dir_put_page(page);
301out:
302 return err;
303out_unlock:
304 unlock_page(page);
305 goto out_put;
306}
307
308int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
309{
310 struct address_space *mapping = page->mapping;
311 struct inode *inode = (struct inode*)mapping->host;
312 char *kaddr = page_address(page);
4a66af9e
NP
313 loff_t pos = page_offset(page) + (char*)de - kaddr;
314 unsigned len = minix_sb(inode->i_sb)->s_dirsize;
1da177e4
LT
315 int err;
316
317 lock_page(page);
4a66af9e
NP
318 err = __minix_write_begin(NULL, mapping, pos, len,
319 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
1da177e4
LT
320 if (err == 0) {
321 de->inode = 0;
4a66af9e 322 err = dir_commit_chunk(page, pos, len);
1da177e4
LT
323 } else {
324 unlock_page(page);
325 }
326 dir_put_page(page);
327 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
328 mark_inode_dirty(inode);
329 return err;
330}
331
332int minix_make_empty(struct inode *inode, struct inode *dir)
333{
334 struct address_space *mapping = inode->i_mapping;
335 struct page *page = grab_cache_page(mapping, 0);
939b00df 336 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
1da177e4
LT
337 char *kaddr;
338 int err;
339
340 if (!page)
341 return -ENOMEM;
4a66af9e
NP
342 err = __minix_write_begin(NULL, mapping, 0, 2 * sbi->s_dirsize,
343 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
1da177e4
LT
344 if (err) {
345 unlock_page(page);
346 goto fail;
347 }
348
349 kaddr = kmap_atomic(page, KM_USER0);
350 memset(kaddr, 0, PAGE_CACHE_SIZE);
351
939b00df
AB
352 if (sbi->s_version == MINIX_V3) {
353 minix3_dirent *de3 = (minix3_dirent *)kaddr;
354
355 de3->inode = inode->i_ino;
356 strcpy(de3->name, ".");
357 de3 = minix_next_entry(de3, sbi);
358 de3->inode = dir->i_ino;
359 strcpy(de3->name, "..");
360 } else {
361 minix_dirent *de = (minix_dirent *)kaddr;
362
363 de->inode = inode->i_ino;
364 strcpy(de->name, ".");
365 de = minix_next_entry(de, sbi);
366 de->inode = dir->i_ino;
367 strcpy(de->name, "..");
368 }
1da177e4
LT
369 kunmap_atomic(kaddr, KM_USER0);
370
371 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
372fail:
373 page_cache_release(page);
374 return err;
375}
376
377/*
378 * routine to check that the specified directory is empty (for rmdir)
379 */
380int minix_empty_dir(struct inode * inode)
381{
382 struct page *page = NULL;
383 unsigned long i, npages = dir_pages(inode);
384 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
939b00df
AB
385 char *name;
386 __u32 inumber;
1da177e4
LT
387
388 for (i = 0; i < npages; i++) {
939b00df 389 char *p, *kaddr, *limit;
1da177e4 390
939b00df 391 page = dir_get_page(inode, i);
1da177e4
LT
392 if (IS_ERR(page))
393 continue;
394
395 kaddr = (char *)page_address(page);
939b00df
AB
396 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
397 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
398 if (sbi->s_version == MINIX_V3) {
399 minix3_dirent *de3 = (minix3_dirent *)p;
400 name = de3->name;
401 inumber = de3->inode;
402 } else {
403 minix_dirent *de = (minix_dirent *)p;
404 name = de->name;
405 inumber = de->inode;
406 }
1da177e4 407
939b00df 408 if (inumber != 0) {
1da177e4 409 /* check for . and .. */
939b00df 410 if (name[0] != '.')
1da177e4 411 goto not_empty;
939b00df
AB
412 if (!name[1]) {
413 if (inumber != inode->i_ino)
1da177e4 414 goto not_empty;
939b00df 415 } else if (name[1] != '.')
1da177e4 416 goto not_empty;
939b00df 417 else if (name[2])
1da177e4
LT
418 goto not_empty;
419 }
1da177e4
LT
420 }
421 dir_put_page(page);
422 }
423 return 1;
424
425not_empty:
426 dir_put_page(page);
427 return 0;
428}
429
430/* Releases the page */
431void minix_set_link(struct minix_dir_entry *de, struct page *page,
432 struct inode *inode)
433{
4a66af9e
NP
434 struct address_space *mapping = page->mapping;
435 struct inode *dir = mapping->host;
1da177e4 436 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
4a66af9e
NP
437 loff_t pos = page_offset(page) +
438 (char *)de-(char*)page_address(page);
1da177e4
LT
439 int err;
440
441 lock_page(page);
4a66af9e
NP
442
443 err = __minix_write_begin(NULL, mapping, pos, sbi->s_dirsize,
444 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
1da177e4
LT
445 if (err == 0) {
446 de->inode = inode->i_ino;
4a66af9e 447 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
1da177e4
LT
448 } else {
449 unlock_page(page);
450 }
451 dir_put_page(page);
452 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
453 mark_inode_dirty(dir);
454}
455
456struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
457{
458 struct page *page = dir_get_page(dir, 0);
459 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
460 struct minix_dir_entry *de = NULL;
461
462 if (!IS_ERR(page)) {
463 de = minix_next_entry(page_address(page), sbi);
464 *p = page;
465 }
466 return de;
467}
468
469ino_t minix_inode_by_name(struct dentry *dentry)
470{
471 struct page *page;
472 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
473 ino_t res = 0;
474
475 if (de) {
476 res = de->inode;
477 dir_put_page(page);
478 }
479 return res;
480}