]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * fs/cifs/cifsfs.c | |
3 | * | |
4 | * Copyright (C) International Business Machines Corp., 2002,2004 | |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | |
6 | * | |
7 | * Common Internet FileSystem (CIFS) client | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU Lesser General Public License as published | |
11 | * by the Free Software Foundation; either version 2.1 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | |
17 | * the GNU Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public License | |
20 | * along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
22 | */ | |
23 | ||
24 | /* Note that BB means BUGBUG (ie something to fix eventually) */ | |
25 | ||
26 | #include <linux/module.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/mount.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/list.h> | |
32 | #include <linux/seq_file.h> | |
33 | #include <linux/vfs.h> | |
34 | #include <linux/mempool.h> | |
35 | #include "cifsfs.h" | |
36 | #include "cifspdu.h" | |
37 | #define DECLARE_GLOBALS_HERE | |
38 | #include "cifsglob.h" | |
39 | #include "cifsproto.h" | |
40 | #include "cifs_debug.h" | |
41 | #include "cifs_fs_sb.h" | |
42 | #include <linux/mm.h> | |
43 | #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ | |
44 | ||
45 | #ifdef CONFIG_CIFS_QUOTA | |
46 | static struct quotactl_ops cifs_quotactl_ops; | |
47 | #endif | |
48 | ||
49 | int cifsFYI = 0; | |
50 | int cifsERROR = 1; | |
51 | int traceSMB = 0; | |
52 | unsigned int oplockEnabled = 1; | |
53 | unsigned int experimEnabled = 0; | |
54 | unsigned int linuxExtEnabled = 1; | |
55 | unsigned int lookupCacheEnabled = 1; | |
56 | unsigned int multiuser_mount = 0; | |
57 | unsigned int extended_security = 0; | |
58 | unsigned int ntlmv2_support = 0; | |
59 | unsigned int sign_CIFS_PDUs = 1; | |
60 | extern struct task_struct * oplockThread; /* remove sparse warning */ | |
61 | struct task_struct * oplockThread = NULL; | |
62 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; | |
63 | module_param(CIFSMaxBufSize, int, 0); | |
64 | MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048"); | |
65 | unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; | |
66 | module_param(cifs_min_rcv, int, 0); | |
67 | MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64"); | |
68 | unsigned int cifs_min_small = 30; | |
69 | module_param(cifs_min_small, int, 0); | |
70 | MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256"); | |
71 | unsigned int cifs_max_pending = CIFS_MAX_REQ; | |
72 | module_param(cifs_max_pending, int, 0); | |
73 | MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256"); | |
74 | ||
75 | static DECLARE_COMPLETION(cifs_oplock_exited); | |
76 | ||
77 | extern mempool_t *cifs_sm_req_poolp; | |
78 | extern mempool_t *cifs_req_poolp; | |
79 | extern mempool_t *cifs_mid_poolp; | |
80 | ||
81 | extern kmem_cache_t *cifs_oplock_cachep; | |
82 | ||
83 | static int | |
84 | cifs_read_super(struct super_block *sb, void *data, | |
85 | const char *devname, int silent) | |
86 | { | |
87 | struct inode *inode; | |
88 | struct cifs_sb_info *cifs_sb; | |
89 | int rc = 0; | |
90 | ||
91 | sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */ | |
92 | sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL); | |
93 | cifs_sb = CIFS_SB(sb); | |
94 | if(cifs_sb == NULL) | |
95 | return -ENOMEM; | |
96 | else | |
97 | memset(cifs_sb,0,sizeof(struct cifs_sb_info)); | |
98 | ||
99 | ||
100 | rc = cifs_mount(sb, cifs_sb, data, devname); | |
101 | ||
102 | if (rc) { | |
103 | if (!silent) | |
104 | cERROR(1, | |
105 | ("cifs_mount failed w/return code = %d", rc)); | |
106 | goto out_mount_failed; | |
107 | } | |
108 | ||
109 | sb->s_magic = CIFS_MAGIC_NUMBER; | |
110 | sb->s_op = &cifs_super_ops; | |
111 | /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) | |
112 | sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ | |
113 | #ifdef CONFIG_CIFS_QUOTA | |
114 | sb->s_qcop = &cifs_quotactl_ops; | |
115 | #endif | |
116 | sb->s_blocksize = CIFS_MAX_MSGSIZE; | |
117 | sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ | |
118 | inode = iget(sb, ROOT_I); | |
119 | ||
120 | if (!inode) { | |
121 | rc = -ENOMEM; | |
122 | goto out_no_root; | |
123 | } | |
124 | ||
125 | sb->s_root = d_alloc_root(inode); | |
126 | ||
127 | if (!sb->s_root) { | |
128 | rc = -ENOMEM; | |
129 | goto out_no_root; | |
130 | } | |
131 | ||
132 | return 0; | |
133 | ||
134 | out_no_root: | |
135 | cERROR(1, ("cifs_read_super: get root inode failed")); | |
136 | if (inode) | |
137 | iput(inode); | |
138 | ||
139 | out_mount_failed: | |
140 | if(cifs_sb) { | |
141 | if(cifs_sb->local_nls) | |
142 | unload_nls(cifs_sb->local_nls); | |
143 | kfree(cifs_sb); | |
144 | } | |
145 | return rc; | |
146 | } | |
147 | ||
148 | static void | |
149 | cifs_put_super(struct super_block *sb) | |
150 | { | |
151 | int rc = 0; | |
152 | struct cifs_sb_info *cifs_sb; | |
153 | ||
154 | cFYI(1, ("In cifs_put_super")); | |
155 | cifs_sb = CIFS_SB(sb); | |
156 | if(cifs_sb == NULL) { | |
157 | cFYI(1,("Empty cifs superblock info passed to unmount")); | |
158 | return; | |
159 | } | |
160 | rc = cifs_umount(sb, cifs_sb); | |
161 | if (rc) { | |
162 | cERROR(1, ("cifs_umount failed with return code %d", rc)); | |
163 | } | |
164 | unload_nls(cifs_sb->local_nls); | |
165 | kfree(cifs_sb); | |
166 | return; | |
167 | } | |
168 | ||
169 | static int | |
170 | cifs_statfs(struct super_block *sb, struct kstatfs *buf) | |
171 | { | |
172 | int xid, rc = -EOPNOTSUPP; | |
173 | struct cifs_sb_info *cifs_sb; | |
174 | struct cifsTconInfo *pTcon; | |
175 | ||
176 | xid = GetXid(); | |
177 | ||
178 | cifs_sb = CIFS_SB(sb); | |
179 | pTcon = cifs_sb->tcon; | |
180 | ||
181 | buf->f_type = CIFS_MAGIC_NUMBER; | |
182 | ||
183 | /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */ | |
184 | buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would presumably | |
185 | be length of total path, note that some servers may be | |
186 | able to support more than this, but best to be safe | |
187 | since Win2k and others can not handle very long filenames */ | |
188 | buf->f_files = 0; /* undefined */ | |
189 | buf->f_ffree = 0; /* unlimited */ | |
190 | ||
191 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
192 | /* BB we could add a second check for a QFS Unix capability bit */ | |
f28ac91b | 193 | /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */ |
1da177e4 | 194 | if (pTcon->ses->capabilities & CAP_UNIX) |
737b758c | 195 | rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf); |
1da177e4 LT |
196 | |
197 | /* Only need to call the old QFSInfo if failed | |
198 | on newer one */ | |
199 | if(rc) | |
200 | #endif /* CIFS_EXPERIMENTAL */ | |
737b758c | 201 | rc = CIFSSMBQFSInfo(xid, pTcon, buf); |
1da177e4 LT |
202 | |
203 | /* | |
204 | int f_type; | |
205 | __fsid_t f_fsid; | |
206 | int f_namelen; */ | |
207 | /* BB get from info put in tcon struct at mount time with call to QFSAttrInfo */ | |
208 | FreeXid(xid); | |
209 | return 0; /* always return success? what if volume is no longer available? */ | |
210 | } | |
211 | ||
212 | static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd) | |
213 | { | |
214 | struct cifs_sb_info *cifs_sb; | |
215 | ||
216 | cifs_sb = CIFS_SB(inode->i_sb); | |
217 | ||
218 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { | |
219 | return 0; | |
220 | } else /* file mode might have been restricted at mount time | |
221 | on the client (above and beyond ACL on servers) for | |
222 | servers which do not support setting and viewing mode bits, | |
223 | so allowing client to check permissions is useful */ | |
224 | return generic_permission(inode, mask, NULL); | |
225 | } | |
226 | ||
227 | static kmem_cache_t *cifs_inode_cachep; | |
228 | static kmem_cache_t *cifs_req_cachep; | |
229 | static kmem_cache_t *cifs_mid_cachep; | |
230 | kmem_cache_t *cifs_oplock_cachep; | |
231 | static kmem_cache_t *cifs_sm_req_cachep; | |
232 | mempool_t *cifs_sm_req_poolp; | |
233 | mempool_t *cifs_req_poolp; | |
234 | mempool_t *cifs_mid_poolp; | |
235 | ||
236 | static struct inode * | |
237 | cifs_alloc_inode(struct super_block *sb) | |
238 | { | |
239 | struct cifsInodeInfo *cifs_inode; | |
240 | cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL); | |
241 | if (!cifs_inode) | |
242 | return NULL; | |
243 | cifs_inode->cifsAttrs = 0x20; /* default */ | |
244 | atomic_set(&cifs_inode->inUse, 0); | |
245 | cifs_inode->time = 0; | |
246 | /* Until the file is open and we have gotten oplock | |
247 | info back from the server, can not assume caching of | |
248 | file data or metadata */ | |
249 | cifs_inode->clientCanCacheRead = FALSE; | |
250 | cifs_inode->clientCanCacheAll = FALSE; | |
251 | cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE; | |
252 | cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ | |
253 | ||
254 | INIT_LIST_HEAD(&cifs_inode->openFileList); | |
255 | return &cifs_inode->vfs_inode; | |
256 | } | |
257 | ||
258 | static void | |
259 | cifs_destroy_inode(struct inode *inode) | |
260 | { | |
261 | kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); | |
262 | } | |
263 | ||
264 | /* | |
265 | * cifs_show_options() is for displaying mount options in /proc/mounts. | |
266 | * Not all settable options are displayed but most of the important | |
267 | * ones are. | |
268 | */ | |
269 | static int | |
270 | cifs_show_options(struct seq_file *s, struct vfsmount *m) | |
271 | { | |
272 | struct cifs_sb_info *cifs_sb; | |
273 | ||
274 | cifs_sb = CIFS_SB(m->mnt_sb); | |
275 | ||
276 | if (cifs_sb) { | |
277 | if (cifs_sb->tcon) { | |
278 | seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName); | |
279 | if (cifs_sb->tcon->ses) { | |
280 | if (cifs_sb->tcon->ses->userName) | |
281 | seq_printf(s, ",username=%s", | |
282 | cifs_sb->tcon->ses->userName); | |
283 | if(cifs_sb->tcon->ses->domainName) | |
284 | seq_printf(s, ",domain=%s", | |
285 | cifs_sb->tcon->ses->domainName); | |
286 | } | |
287 | } | |
288 | seq_printf(s, ",rsize=%d",cifs_sb->rsize); | |
289 | seq_printf(s, ",wsize=%d",cifs_sb->wsize); | |
290 | } | |
291 | return 0; | |
292 | } | |
293 | ||
294 | #ifdef CONFIG_CIFS_QUOTA | |
295 | int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid, | |
296 | struct fs_disk_quota * pdquota) | |
297 | { | |
298 | int xid; | |
299 | int rc = 0; | |
300 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
301 | struct cifsTconInfo *pTcon; | |
302 | ||
303 | if(cifs_sb) | |
304 | pTcon = cifs_sb->tcon; | |
305 | else | |
306 | return -EIO; | |
307 | ||
308 | ||
309 | xid = GetXid(); | |
310 | if(pTcon) { | |
311 | cFYI(1,("set type: 0x%x id: %d",quota_type,qid)); | |
312 | } else { | |
313 | return -EIO; | |
314 | } | |
315 | ||
316 | FreeXid(xid); | |
317 | return rc; | |
318 | } | |
319 | ||
320 | int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid, | |
321 | struct fs_disk_quota * pdquota) | |
322 | { | |
323 | int xid; | |
324 | int rc = 0; | |
325 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
326 | struct cifsTconInfo *pTcon; | |
327 | ||
328 | if(cifs_sb) | |
329 | pTcon = cifs_sb->tcon; | |
330 | else | |
331 | return -EIO; | |
332 | ||
333 | xid = GetXid(); | |
334 | if(pTcon) { | |
335 | cFYI(1,("set type: 0x%x id: %d",quota_type,qid)); | |
336 | } else { | |
337 | rc = -EIO; | |
338 | } | |
339 | ||
340 | FreeXid(xid); | |
341 | return rc; | |
342 | } | |
343 | ||
344 | int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation) | |
345 | { | |
346 | int xid; | |
347 | int rc = 0; | |
348 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
349 | struct cifsTconInfo *pTcon; | |
350 | ||
351 | if(cifs_sb) | |
352 | pTcon = cifs_sb->tcon; | |
353 | else | |
354 | return -EIO; | |
355 | ||
356 | xid = GetXid(); | |
357 | if(pTcon) { | |
358 | cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation)); | |
359 | } else { | |
360 | rc = -EIO; | |
361 | } | |
362 | ||
363 | FreeXid(xid); | |
364 | return rc; | |
365 | } | |
366 | ||
367 | int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats) | |
368 | { | |
369 | int xid; | |
370 | int rc = 0; | |
371 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
372 | struct cifsTconInfo *pTcon; | |
373 | ||
374 | if(cifs_sb) { | |
375 | pTcon = cifs_sb->tcon; | |
376 | } else { | |
377 | return -EIO; | |
378 | } | |
379 | xid = GetXid(); | |
380 | if(pTcon) { | |
381 | cFYI(1,("pqstats %p",qstats)); | |
382 | } else { | |
383 | rc = -EIO; | |
384 | } | |
385 | ||
386 | FreeXid(xid); | |
387 | return rc; | |
388 | } | |
389 | ||
390 | static struct quotactl_ops cifs_quotactl_ops = { | |
391 | .set_xquota = cifs_xquota_set, | |
392 | .get_xquota = cifs_xquota_set, | |
393 | .set_xstate = cifs_xstate_set, | |
394 | .get_xstate = cifs_xstate_get, | |
395 | }; | |
396 | #endif | |
397 | ||
398 | static int cifs_remount(struct super_block *sb, int *flags, char *data) | |
399 | { | |
400 | *flags |= MS_NODIRATIME; | |
401 | return 0; | |
402 | } | |
403 | ||
404 | struct super_operations cifs_super_ops = { | |
405 | .read_inode = cifs_read_inode, | |
406 | .put_super = cifs_put_super, | |
407 | .statfs = cifs_statfs, | |
408 | .alloc_inode = cifs_alloc_inode, | |
409 | .destroy_inode = cifs_destroy_inode, | |
410 | /* .drop_inode = generic_delete_inode, | |
411 | .delete_inode = cifs_delete_inode, *//* Do not need the above two functions | |
412 | unless later we add lazy close of inodes or unless the kernel forgets to call | |
413 | us with the same number of releases (closes) as opens */ | |
414 | .show_options = cifs_show_options, | |
415 | /* .umount_begin = cifs_umount_begin, *//* consider adding in the future */ | |
416 | .remount_fs = cifs_remount, | |
417 | }; | |
418 | ||
419 | static struct super_block * | |
420 | cifs_get_sb(struct file_system_type *fs_type, | |
421 | int flags, const char *dev_name, void *data) | |
422 | { | |
423 | int rc; | |
424 | struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL); | |
425 | ||
426 | cFYI(1, ("Devname: %s flags: %d ", dev_name, flags)); | |
427 | ||
428 | if (IS_ERR(sb)) | |
429 | return sb; | |
430 | ||
431 | sb->s_flags = flags; | |
432 | ||
433 | rc = cifs_read_super(sb, data, dev_name, flags & MS_VERBOSE ? 1 : 0); | |
434 | if (rc) { | |
435 | up_write(&sb->s_umount); | |
436 | deactivate_super(sb); | |
437 | return ERR_PTR(rc); | |
438 | } | |
439 | sb->s_flags |= MS_ACTIVE; | |
440 | return sb; | |
441 | } | |
442 | ||
443 | static ssize_t | |
444 | cifs_read_wrapper(struct file * file, char __user *read_data, size_t read_size, | |
445 | loff_t * poffset) | |
446 | { | |
447 | if(file->f_dentry == NULL) | |
448 | return -EIO; | |
449 | else if(file->f_dentry->d_inode == NULL) | |
450 | return -EIO; | |
451 | ||
452 | cFYI(1,("In read_wrapper size %zd at %lld",read_size,*poffset)); | |
453 | ||
454 | if(CIFS_I(file->f_dentry->d_inode)->clientCanCacheRead) { | |
455 | return generic_file_read(file,read_data,read_size,poffset); | |
456 | } else { | |
457 | /* BB do we need to lock inode from here until after invalidate? */ | |
458 | /* if(file->f_dentry->d_inode->i_mapping) { | |
459 | filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); | |
460 | filemap_fdatawait(file->f_dentry->d_inode->i_mapping); | |
461 | }*/ | |
462 | /* cifs_revalidate(file->f_dentry);*/ /* BB fixme */ | |
463 | ||
464 | /* BB we should make timer configurable - perhaps | |
465 | by simply calling cifs_revalidate here */ | |
466 | /* invalidate_remote_inode(file->f_dentry->d_inode);*/ | |
467 | return generic_file_read(file,read_data,read_size,poffset); | |
468 | } | |
469 | } | |
470 | ||
471 | static ssize_t | |
472 | cifs_write_wrapper(struct file * file, const char __user *write_data, | |
473 | size_t write_size, loff_t * poffset) | |
474 | { | |
475 | ssize_t written; | |
476 | ||
477 | if(file->f_dentry == NULL) | |
478 | return -EIO; | |
479 | else if(file->f_dentry->d_inode == NULL) | |
480 | return -EIO; | |
481 | ||
482 | cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset)); | |
483 | ||
484 | written = generic_file_write(file,write_data,write_size,poffset); | |
485 | if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll) { | |
486 | if(file->f_dentry->d_inode->i_mapping) { | |
487 | filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); | |
488 | } | |
489 | } | |
490 | return written; | |
491 | } | |
492 | ||
493 | ||
494 | static struct file_system_type cifs_fs_type = { | |
495 | .owner = THIS_MODULE, | |
496 | .name = "cifs", | |
497 | .get_sb = cifs_get_sb, | |
498 | .kill_sb = kill_anon_super, | |
499 | /* .fs_flags */ | |
500 | }; | |
501 | struct inode_operations cifs_dir_inode_ops = { | |
502 | .create = cifs_create, | |
503 | .lookup = cifs_lookup, | |
504 | .getattr = cifs_getattr, | |
505 | .unlink = cifs_unlink, | |
506 | .link = cifs_hardlink, | |
507 | .mkdir = cifs_mkdir, | |
508 | .rmdir = cifs_rmdir, | |
509 | .rename = cifs_rename, | |
510 | .permission = cifs_permission, | |
511 | /* revalidate:cifs_revalidate, */ | |
512 | .setattr = cifs_setattr, | |
513 | .symlink = cifs_symlink, | |
514 | .mknod = cifs_mknod, | |
515 | #ifdef CONFIG_CIFS_XATTR | |
516 | .setxattr = cifs_setxattr, | |
517 | .getxattr = cifs_getxattr, | |
518 | .listxattr = cifs_listxattr, | |
519 | .removexattr = cifs_removexattr, | |
520 | #endif | |
521 | }; | |
522 | ||
523 | struct inode_operations cifs_file_inode_ops = { | |
524 | /* revalidate:cifs_revalidate, */ | |
525 | .setattr = cifs_setattr, | |
526 | .getattr = cifs_getattr, /* do we need this anymore? */ | |
527 | .rename = cifs_rename, | |
528 | .permission = cifs_permission, | |
529 | #ifdef CONFIG_CIFS_XATTR | |
530 | .setxattr = cifs_setxattr, | |
531 | .getxattr = cifs_getxattr, | |
532 | .listxattr = cifs_listxattr, | |
533 | .removexattr = cifs_removexattr, | |
534 | #endif | |
535 | }; | |
536 | ||
537 | struct inode_operations cifs_symlink_inode_ops = { | |
538 | .readlink = generic_readlink, | |
539 | .follow_link = cifs_follow_link, | |
540 | .put_link = cifs_put_link, | |
541 | .permission = cifs_permission, | |
542 | /* BB add the following two eventually */ | |
543 | /* revalidate: cifs_revalidate, | |
544 | setattr: cifs_notify_change, *//* BB do we need notify change */ | |
545 | #ifdef CONFIG_CIFS_XATTR | |
546 | .setxattr = cifs_setxattr, | |
547 | .getxattr = cifs_getxattr, | |
548 | .listxattr = cifs_listxattr, | |
549 | .removexattr = cifs_removexattr, | |
550 | #endif | |
551 | }; | |
552 | ||
553 | struct file_operations cifs_file_ops = { | |
554 | .read = cifs_read_wrapper, | |
555 | .write = cifs_write_wrapper, | |
556 | .open = cifs_open, | |
557 | .release = cifs_close, | |
558 | .lock = cifs_lock, | |
559 | .fsync = cifs_fsync, | |
560 | .flush = cifs_flush, | |
561 | .mmap = cifs_file_mmap, | |
562 | .sendfile = generic_file_sendfile, | |
c67593a0 SF |
563 | #ifdef CONFIG_CIFS_POSIX |
564 | .ioctl = cifs_ioctl, | |
565 | #endif /* CONFIG_CIFS_POSIX */ | |
566 | ||
1da177e4 LT |
567 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
568 | .readv = generic_file_readv, | |
569 | .writev = generic_file_writev, | |
570 | .aio_read = generic_file_aio_read, | |
571 | .aio_write = generic_file_aio_write, | |
572 | .dir_notify = cifs_dir_notify, | |
573 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
574 | }; | |
575 | ||
576 | struct file_operations cifs_file_direct_ops = { | |
577 | /* no mmap, no aio, no readv - | |
578 | BB reevaluate whether they can be done with directio, no cache */ | |
579 | .read = cifs_user_read, | |
580 | .write = cifs_user_write, | |
581 | .open = cifs_open, | |
582 | .release = cifs_close, | |
583 | .lock = cifs_lock, | |
584 | .fsync = cifs_fsync, | |
585 | .flush = cifs_flush, | |
586 | .sendfile = generic_file_sendfile, /* BB removeme BB */ | |
c67593a0 SF |
587 | #ifdef CONFIG_CIFS_POSIX |
588 | .ioctl = cifs_ioctl, | |
589 | #endif /* CONFIG_CIFS_POSIX */ | |
590 | ||
1da177e4 LT |
591 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
592 | .dir_notify = cifs_dir_notify, | |
593 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
594 | }; | |
595 | ||
596 | struct file_operations cifs_dir_ops = { | |
597 | .readdir = cifs_readdir, | |
598 | .release = cifs_closedir, | |
599 | .read = generic_read_dir, | |
600 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
601 | .dir_notify = cifs_dir_notify, | |
602 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
f28ac91b SF |
603 | #ifdef CONFIG_CIFS_POSIX |
604 | .ioctl = cifs_ioctl, | |
605 | #endif /* CONFIG_CIFS_POSIX */ | |
1da177e4 LT |
606 | }; |
607 | ||
608 | static void | |
609 | cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags) | |
610 | { | |
611 | struct cifsInodeInfo *cifsi = inode; | |
612 | ||
613 | if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == | |
614 | SLAB_CTOR_CONSTRUCTOR) { | |
615 | inode_init_once(&cifsi->vfs_inode); | |
616 | INIT_LIST_HEAD(&cifsi->lockList); | |
617 | } | |
618 | } | |
619 | ||
620 | static int | |
621 | cifs_init_inodecache(void) | |
622 | { | |
623 | cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", | |
624 | sizeof (struct cifsInodeInfo), | |
625 | 0, SLAB_RECLAIM_ACCOUNT, | |
626 | cifs_init_once, NULL); | |
627 | if (cifs_inode_cachep == NULL) | |
628 | return -ENOMEM; | |
629 | ||
630 | return 0; | |
631 | } | |
632 | ||
633 | static void | |
634 | cifs_destroy_inodecache(void) | |
635 | { | |
636 | if (kmem_cache_destroy(cifs_inode_cachep)) | |
637 | printk(KERN_WARNING "cifs_inode_cache: error freeing\n"); | |
638 | } | |
639 | ||
640 | static int | |
641 | cifs_init_request_bufs(void) | |
642 | { | |
643 | if(CIFSMaxBufSize < 8192) { | |
644 | /* Buffer size can not be smaller than 2 * PATH_MAX since maximum | |
645 | Unicode path name has to fit in any SMB/CIFS path based frames */ | |
646 | CIFSMaxBufSize = 8192; | |
647 | } else if (CIFSMaxBufSize > 1024*127) { | |
648 | CIFSMaxBufSize = 1024 * 127; | |
649 | } else { | |
650 | CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ | |
651 | } | |
652 | /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */ | |
653 | cifs_req_cachep = kmem_cache_create("cifs_request", | |
654 | CIFSMaxBufSize + | |
655 | MAX_CIFS_HDR_SIZE, 0, | |
656 | SLAB_HWCACHE_ALIGN, NULL, NULL); | |
657 | if (cifs_req_cachep == NULL) | |
658 | return -ENOMEM; | |
659 | ||
660 | if(cifs_min_rcv < 1) | |
661 | cifs_min_rcv = 1; | |
662 | else if (cifs_min_rcv > 64) { | |
663 | cifs_min_rcv = 64; | |
664 | cERROR(1,("cifs_min_rcv set to maximum (64)")); | |
665 | } | |
666 | ||
667 | cifs_req_poolp = mempool_create(cifs_min_rcv, | |
668 | mempool_alloc_slab, | |
669 | mempool_free_slab, | |
670 | cifs_req_cachep); | |
671 | ||
672 | if(cifs_req_poolp == NULL) { | |
673 | kmem_cache_destroy(cifs_req_cachep); | |
674 | return -ENOMEM; | |
675 | } | |
676 | /* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and | |
677 | almost all handle based requests (but not write response, nor is it | |
678 | sufficient for path based requests). A smaller size would have | |
679 | been more efficient (compacting multiple slab items on one 4k page) | |
680 | for the case in which debug was on, but this larger size allows | |
681 | more SMBs to use small buffer alloc and is still much more | |
682 | efficient to alloc 1 per page off the slab compared to 17K (5page) | |
683 | alloc of large cifs buffers even when page debugging is on */ | |
684 | cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", | |
685 | MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL); | |
686 | if (cifs_sm_req_cachep == NULL) { | |
687 | mempool_destroy(cifs_req_poolp); | |
688 | kmem_cache_destroy(cifs_req_cachep); | |
689 | return -ENOMEM; | |
690 | } | |
691 | ||
692 | if(cifs_min_small < 2) | |
693 | cifs_min_small = 2; | |
694 | else if (cifs_min_small > 256) { | |
695 | cifs_min_small = 256; | |
696 | cFYI(1,("cifs_min_small set to maximum (256)")); | |
697 | } | |
698 | ||
699 | cifs_sm_req_poolp = mempool_create(cifs_min_small, | |
700 | mempool_alloc_slab, | |
701 | mempool_free_slab, | |
702 | cifs_sm_req_cachep); | |
703 | ||
704 | if(cifs_sm_req_poolp == NULL) { | |
705 | mempool_destroy(cifs_req_poolp); | |
706 | kmem_cache_destroy(cifs_req_cachep); | |
707 | kmem_cache_destroy(cifs_sm_req_cachep); | |
708 | return -ENOMEM; | |
709 | } | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
714 | static void | |
715 | cifs_destroy_request_bufs(void) | |
716 | { | |
717 | mempool_destroy(cifs_req_poolp); | |
718 | if (kmem_cache_destroy(cifs_req_cachep)) | |
719 | printk(KERN_WARNING | |
720 | "cifs_destroy_request_cache: error not all structures were freed\n"); | |
721 | mempool_destroy(cifs_sm_req_poolp); | |
722 | if (kmem_cache_destroy(cifs_sm_req_cachep)) | |
723 | printk(KERN_WARNING | |
724 | "cifs_destroy_request_cache: cifs_small_rq free error\n"); | |
725 | } | |
726 | ||
727 | static int | |
728 | cifs_init_mids(void) | |
729 | { | |
730 | cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", | |
731 | sizeof (struct mid_q_entry), 0, | |
732 | SLAB_HWCACHE_ALIGN, NULL, NULL); | |
733 | if (cifs_mid_cachep == NULL) | |
734 | return -ENOMEM; | |
735 | ||
736 | cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */, | |
737 | mempool_alloc_slab, | |
738 | mempool_free_slab, | |
739 | cifs_mid_cachep); | |
740 | if(cifs_mid_poolp == NULL) { | |
741 | kmem_cache_destroy(cifs_mid_cachep); | |
742 | return -ENOMEM; | |
743 | } | |
744 | ||
745 | cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs", | |
746 | sizeof (struct oplock_q_entry), 0, | |
747 | SLAB_HWCACHE_ALIGN, NULL, NULL); | |
748 | if (cifs_oplock_cachep == NULL) { | |
749 | kmem_cache_destroy(cifs_mid_cachep); | |
750 | mempool_destroy(cifs_mid_poolp); | |
751 | return -ENOMEM; | |
752 | } | |
753 | ||
754 | return 0; | |
755 | } | |
756 | ||
757 | static void | |
758 | cifs_destroy_mids(void) | |
759 | { | |
760 | mempool_destroy(cifs_mid_poolp); | |
761 | if (kmem_cache_destroy(cifs_mid_cachep)) | |
762 | printk(KERN_WARNING | |
763 | "cifs_destroy_mids: error not all structures were freed\n"); | |
764 | ||
765 | if (kmem_cache_destroy(cifs_oplock_cachep)) | |
766 | printk(KERN_WARNING | |
767 | "error not all oplock structures were freed\n"); | |
768 | } | |
769 | ||
770 | static int cifs_oplock_thread(void * dummyarg) | |
771 | { | |
772 | struct oplock_q_entry * oplock_item; | |
773 | struct cifsTconInfo *pTcon; | |
774 | struct inode * inode; | |
775 | __u16 netfid; | |
776 | int rc; | |
777 | ||
778 | daemonize("cifsoplockd"); | |
779 | allow_signal(SIGTERM); | |
780 | ||
781 | oplockThread = current; | |
782 | do { | |
783 | set_current_state(TASK_INTERRUPTIBLE); | |
784 | ||
785 | schedule_timeout(1*HZ); | |
786 | spin_lock(&GlobalMid_Lock); | |
787 | if(list_empty(&GlobalOplock_Q)) { | |
788 | spin_unlock(&GlobalMid_Lock); | |
789 | set_current_state(TASK_INTERRUPTIBLE); | |
790 | schedule_timeout(39*HZ); | |
791 | } else { | |
792 | oplock_item = list_entry(GlobalOplock_Q.next, | |
793 | struct oplock_q_entry, qhead); | |
794 | if(oplock_item) { | |
795 | cFYI(1,("found oplock item to write out")); | |
796 | pTcon = oplock_item->tcon; | |
797 | inode = oplock_item->pinode; | |
798 | netfid = oplock_item->netfid; | |
799 | spin_unlock(&GlobalMid_Lock); | |
800 | DeleteOplockQEntry(oplock_item); | |
801 | /* can not grab inode sem here since it would | |
802 | deadlock when oplock received on delete | |
803 | since vfs_unlink holds the i_sem across | |
804 | the call */ | |
805 | /* down(&inode->i_sem);*/ | |
806 | if (S_ISREG(inode->i_mode)) { | |
807 | rc = filemap_fdatawrite(inode->i_mapping); | |
808 | if(CIFS_I(inode)->clientCanCacheRead == 0) { | |
809 | filemap_fdatawait(inode->i_mapping); | |
810 | invalidate_remote_inode(inode); | |
811 | } | |
812 | } else | |
813 | rc = 0; | |
814 | /* up(&inode->i_sem);*/ | |
815 | if (rc) | |
816 | CIFS_I(inode)->write_behind_rc = rc; | |
817 | cFYI(1,("Oplock flush inode %p rc %d",inode,rc)); | |
818 | ||
819 | /* releasing a stale oplock after recent reconnection | |
820 | of smb session using a now incorrect file | |
821 | handle is not a data integrity issue but do | |
822 | not bother sending an oplock release if session | |
823 | to server still is disconnected since oplock | |
824 | already released by the server in that case */ | |
825 | if(pTcon->tidStatus != CifsNeedReconnect) { | |
826 | rc = CIFSSMBLock(0, pTcon, netfid, | |
827 | 0 /* len */ , 0 /* offset */, 0, | |
828 | 0, LOCKING_ANDX_OPLOCK_RELEASE, | |
829 | 0 /* wait flag */); | |
830 | cFYI(1,("Oplock release rc = %d ",rc)); | |
831 | } | |
832 | } else | |
833 | spin_unlock(&GlobalMid_Lock); | |
834 | } | |
835 | } while(!signal_pending(current)); | |
836 | complete_and_exit (&cifs_oplock_exited, 0); | |
837 | } | |
838 | ||
839 | static int __init | |
840 | init_cifs(void) | |
841 | { | |
842 | int rc = 0; | |
843 | #ifdef CONFIG_PROC_FS | |
844 | cifs_proc_init(); | |
845 | #endif | |
846 | INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */ | |
847 | INIT_LIST_HEAD(&GlobalSMBSessionList); | |
848 | INIT_LIST_HEAD(&GlobalTreeConnectionList); | |
849 | INIT_LIST_HEAD(&GlobalOplock_Q); | |
850 | /* | |
851 | * Initialize Global counters | |
852 | */ | |
853 | atomic_set(&sesInfoAllocCount, 0); | |
854 | atomic_set(&tconInfoAllocCount, 0); | |
855 | atomic_set(&tcpSesAllocCount,0); | |
856 | atomic_set(&tcpSesReconnectCount, 0); | |
857 | atomic_set(&tconInfoReconnectCount, 0); | |
858 | ||
859 | atomic_set(&bufAllocCount, 0); | |
860 | atomic_set(&midCount, 0); | |
861 | GlobalCurrentXid = 0; | |
862 | GlobalTotalActiveXid = 0; | |
863 | GlobalMaxActiveXid = 0; | |
864 | rwlock_init(&GlobalSMBSeslock); | |
865 | spin_lock_init(&GlobalMid_Lock); | |
866 | ||
867 | if(cifs_max_pending < 2) { | |
868 | cifs_max_pending = 2; | |
869 | cFYI(1,("cifs_max_pending set to min of 2")); | |
870 | } else if(cifs_max_pending > 256) { | |
871 | cifs_max_pending = 256; | |
872 | cFYI(1,("cifs_max_pending set to max of 256")); | |
873 | } | |
874 | ||
875 | rc = cifs_init_inodecache(); | |
876 | if (!rc) { | |
877 | rc = cifs_init_mids(); | |
878 | if (!rc) { | |
879 | rc = cifs_init_request_bufs(); | |
880 | if (!rc) { | |
881 | rc = register_filesystem(&cifs_fs_type); | |
882 | if (!rc) { | |
883 | rc = (int)kernel_thread(cifs_oplock_thread, NULL, | |
884 | CLONE_FS | CLONE_FILES | CLONE_VM); | |
885 | if(rc > 0) | |
886 | return 0; | |
887 | else | |
888 | cERROR(1,("error %d create oplock thread",rc)); | |
889 | } | |
890 | cifs_destroy_request_bufs(); | |
891 | } | |
892 | cifs_destroy_mids(); | |
893 | } | |
894 | cifs_destroy_inodecache(); | |
895 | } | |
896 | #ifdef CONFIG_PROC_FS | |
897 | cifs_proc_clean(); | |
898 | #endif | |
899 | return rc; | |
900 | } | |
901 | ||
902 | static void __exit | |
903 | exit_cifs(void) | |
904 | { | |
905 | cFYI(0, ("In unregister ie exit_cifs")); | |
906 | #ifdef CONFIG_PROC_FS | |
907 | cifs_proc_clean(); | |
908 | #endif | |
909 | unregister_filesystem(&cifs_fs_type); | |
910 | cifs_destroy_inodecache(); | |
911 | cifs_destroy_mids(); | |
912 | cifs_destroy_request_bufs(); | |
913 | if(oplockThread) { | |
914 | send_sig(SIGTERM, oplockThread, 1); | |
915 | wait_for_completion(&cifs_oplock_exited); | |
916 | } | |
917 | } | |
918 | ||
919 | MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); | |
920 | MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ | |
921 | MODULE_DESCRIPTION | |
922 | ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows"); | |
923 | MODULE_VERSION(CIFS_VERSION); | |
924 | module_init(init_cifs) | |
925 | module_exit(exit_cifs) |