]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/cifsfs.c
[CIFS] /proc/fs/cifs debug code cleanup and new stats2
[net-next-2.6.git] / fs / cifs / cifsfs.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24/* Note that BB means BUGBUG (ie something to fix eventually) */
25
26#include <linux/module.h>
27#include <linux/fs.h>
28#include <linux/mount.h>
29#include <linux/slab.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/seq_file.h>
33#include <linux/vfs.h>
34#include <linux/mempool.h>
35#include "cifsfs.h"
36#include "cifspdu.h"
37#define DECLARE_GLOBALS_HERE
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
41#include "cifs_fs_sb.h"
42#include <linux/mm.h>
43#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
44
45#ifdef CONFIG_CIFS_QUOTA
46static struct quotactl_ops cifs_quotactl_ops;
47#endif
48
49int cifsFYI = 0;
50int cifsERROR = 1;
51int traceSMB = 0;
52unsigned int oplockEnabled = 1;
53unsigned int experimEnabled = 0;
54unsigned int linuxExtEnabled = 1;
55unsigned int lookupCacheEnabled = 1;
56unsigned int multiuser_mount = 0;
57unsigned int extended_security = 0;
58unsigned int ntlmv2_support = 0;
59unsigned int sign_CIFS_PDUs = 1;
60extern struct task_struct * oplockThread; /* remove sparse warning */
61struct task_struct * oplockThread = NULL;
8d0d5094
SF
62extern struct task_struct * dnotifyThread; /* remove sparse warning */
63struct task_struct * dnotifyThread = NULL;
1da177e4
LT
64unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
65module_param(CIFSMaxBufSize, int, 0);
66MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
67unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
68module_param(cifs_min_rcv, int, 0);
69MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
70unsigned int cifs_min_small = 30;
71module_param(cifs_min_small, int, 0);
72MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
73unsigned int cifs_max_pending = CIFS_MAX_REQ;
74module_param(cifs_max_pending, int, 0);
75MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
76
77static DECLARE_COMPLETION(cifs_oplock_exited);
8d0d5094 78static DECLARE_COMPLETION(cifs_dnotify_exited);
1da177e4
LT
79
80extern mempool_t *cifs_sm_req_poolp;
81extern mempool_t *cifs_req_poolp;
82extern mempool_t *cifs_mid_poolp;
83
84extern kmem_cache_t *cifs_oplock_cachep;
85
86static int
87cifs_read_super(struct super_block *sb, void *data,
88 const char *devname, int silent)
89{
90 struct inode *inode;
91 struct cifs_sb_info *cifs_sb;
92 int rc = 0;
93
94 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
95 sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
96 cifs_sb = CIFS_SB(sb);
97 if(cifs_sb == NULL)
98 return -ENOMEM;
99 else
100 memset(cifs_sb,0,sizeof(struct cifs_sb_info));
101
102
103 rc = cifs_mount(sb, cifs_sb, data, devname);
104
105 if (rc) {
106 if (!silent)
107 cERROR(1,
108 ("cifs_mount failed w/return code = %d", rc));
109 goto out_mount_failed;
110 }
111
112 sb->s_magic = CIFS_MAGIC_NUMBER;
113 sb->s_op = &cifs_super_ops;
114/* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
115 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
116#ifdef CONFIG_CIFS_QUOTA
117 sb->s_qcop = &cifs_quotactl_ops;
118#endif
119 sb->s_blocksize = CIFS_MAX_MSGSIZE;
120 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
121 inode = iget(sb, ROOT_I);
122
123 if (!inode) {
124 rc = -ENOMEM;
125 goto out_no_root;
126 }
127
128 sb->s_root = d_alloc_root(inode);
129
130 if (!sb->s_root) {
131 rc = -ENOMEM;
132 goto out_no_root;
133 }
134
135 return 0;
136
137out_no_root:
138 cERROR(1, ("cifs_read_super: get root inode failed"));
139 if (inode)
140 iput(inode);
141
142out_mount_failed:
143 if(cifs_sb) {
144 if(cifs_sb->local_nls)
145 unload_nls(cifs_sb->local_nls);
146 kfree(cifs_sb);
147 }
148 return rc;
149}
150
151static void
152cifs_put_super(struct super_block *sb)
153{
154 int rc = 0;
155 struct cifs_sb_info *cifs_sb;
156
157 cFYI(1, ("In cifs_put_super"));
158 cifs_sb = CIFS_SB(sb);
159 if(cifs_sb == NULL) {
160 cFYI(1,("Empty cifs superblock info passed to unmount"));
161 return;
162 }
163 rc = cifs_umount(sb, cifs_sb);
164 if (rc) {
165 cERROR(1, ("cifs_umount failed with return code %d", rc));
166 }
167 unload_nls(cifs_sb->local_nls);
168 kfree(cifs_sb);
169 return;
170}
171
172static int
173cifs_statfs(struct super_block *sb, struct kstatfs *buf)
174{
c81156dd
SF
175 int xid;
176 int rc = -EOPNOTSUPP;
1da177e4
LT
177 struct cifs_sb_info *cifs_sb;
178 struct cifsTconInfo *pTcon;
179
180 xid = GetXid();
181
182 cifs_sb = CIFS_SB(sb);
183 pTcon = cifs_sb->tcon;
184
185 buf->f_type = CIFS_MAGIC_NUMBER;
186
187 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
c81156dd
SF
188 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
189 presumably be total path, but note
190 that some servers (includinng Samba 3)
191 have a shorter maximum path */
1da177e4
LT
192 buf->f_files = 0; /* undefined */
193 buf->f_ffree = 0; /* unlimited */
194
195#ifdef CONFIG_CIFS_EXPERIMENTAL
196/* BB we could add a second check for a QFS Unix capability bit */
f28ac91b 197/* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
c81156dd
SF
198 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
199 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
737b758c 200 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
1da177e4
LT
201
202 /* Only need to call the old QFSInfo if failed
203 on newer one */
204 if(rc)
205#endif /* CIFS_EXPERIMENTAL */
737b758c 206 rc = CIFSSMBQFSInfo(xid, pTcon, buf);
1da177e4 207
20962438
SF
208 /* Old Windows servers do not support level 103, retry with level
209 one if old server failed the previous call */
210 if(rc)
211 rc = SMBOldQFSInfo(xid, pTcon, buf);
1da177e4
LT
212 /*
213 int f_type;
214 __fsid_t f_fsid;
215 int f_namelen; */
c81156dd 216 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
1da177e4 217 FreeXid(xid);
c81156dd
SF
218 return 0; /* always return success? what if volume is no
219 longer available? */
1da177e4
LT
220}
221
222static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
223{
224 struct cifs_sb_info *cifs_sb;
225
226 cifs_sb = CIFS_SB(inode->i_sb);
227
228 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
229 return 0;
230 } else /* file mode might have been restricted at mount time
231 on the client (above and beyond ACL on servers) for
232 servers which do not support setting and viewing mode bits,
233 so allowing client to check permissions is useful */
234 return generic_permission(inode, mask, NULL);
235}
236
237static kmem_cache_t *cifs_inode_cachep;
238static kmem_cache_t *cifs_req_cachep;
239static kmem_cache_t *cifs_mid_cachep;
240kmem_cache_t *cifs_oplock_cachep;
241static kmem_cache_t *cifs_sm_req_cachep;
242mempool_t *cifs_sm_req_poolp;
243mempool_t *cifs_req_poolp;
244mempool_t *cifs_mid_poolp;
245
246static struct inode *
247cifs_alloc_inode(struct super_block *sb)
248{
249 struct cifsInodeInfo *cifs_inode;
250 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
251 if (!cifs_inode)
252 return NULL;
253 cifs_inode->cifsAttrs = 0x20; /* default */
254 atomic_set(&cifs_inode->inUse, 0);
255 cifs_inode->time = 0;
256 /* Until the file is open and we have gotten oplock
257 info back from the server, can not assume caching of
258 file data or metadata */
259 cifs_inode->clientCanCacheRead = FALSE;
260 cifs_inode->clientCanCacheAll = FALSE;
261 cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE;
262 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
e30dcf3a 263 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
1da177e4
LT
264 INIT_LIST_HEAD(&cifs_inode->openFileList);
265 return &cifs_inode->vfs_inode;
266}
267
268static void
269cifs_destroy_inode(struct inode *inode)
270{
271 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
272}
273
274/*
275 * cifs_show_options() is for displaying mount options in /proc/mounts.
276 * Not all settable options are displayed but most of the important
277 * ones are.
278 */
279static int
280cifs_show_options(struct seq_file *s, struct vfsmount *m)
281{
282 struct cifs_sb_info *cifs_sb;
283
284 cifs_sb = CIFS_SB(m->mnt_sb);
285
286 if (cifs_sb) {
287 if (cifs_sb->tcon) {
288 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
289 if (cifs_sb->tcon->ses) {
290 if (cifs_sb->tcon->ses->userName)
291 seq_printf(s, ",username=%s",
292 cifs_sb->tcon->ses->userName);
293 if(cifs_sb->tcon->ses->domainName)
294 seq_printf(s, ",domain=%s",
295 cifs_sb->tcon->ses->domainName);
296 }
297 }
298 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
299 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
300 }
301 return 0;
302}
303
304#ifdef CONFIG_CIFS_QUOTA
305int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
306 struct fs_disk_quota * pdquota)
307{
308 int xid;
309 int rc = 0;
310 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
311 struct cifsTconInfo *pTcon;
312
313 if(cifs_sb)
314 pTcon = cifs_sb->tcon;
315 else
316 return -EIO;
317
318
319 xid = GetXid();
320 if(pTcon) {
321 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
322 } else {
323 return -EIO;
324 }
325
326 FreeXid(xid);
327 return rc;
328}
329
330int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
331 struct fs_disk_quota * pdquota)
332{
333 int xid;
334 int rc = 0;
335 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
336 struct cifsTconInfo *pTcon;
337
338 if(cifs_sb)
339 pTcon = cifs_sb->tcon;
340 else
341 return -EIO;
342
343 xid = GetXid();
344 if(pTcon) {
345 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
346 } else {
347 rc = -EIO;
348 }
349
350 FreeXid(xid);
351 return rc;
352}
353
354int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
355{
356 int xid;
357 int rc = 0;
358 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
359 struct cifsTconInfo *pTcon;
360
361 if(cifs_sb)
362 pTcon = cifs_sb->tcon;
363 else
364 return -EIO;
365
366 xid = GetXid();
367 if(pTcon) {
368 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
369 } else {
370 rc = -EIO;
371 }
372
373 FreeXid(xid);
374 return rc;
375}
376
377int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
378{
379 int xid;
380 int rc = 0;
381 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
382 struct cifsTconInfo *pTcon;
383
384 if(cifs_sb) {
385 pTcon = cifs_sb->tcon;
386 } else {
387 return -EIO;
388 }
389 xid = GetXid();
390 if(pTcon) {
391 cFYI(1,("pqstats %p",qstats));
392 } else {
393 rc = -EIO;
394 }
395
396 FreeXid(xid);
397 return rc;
398}
399
400static struct quotactl_ops cifs_quotactl_ops = {
401 .set_xquota = cifs_xquota_set,
402 .get_xquota = cifs_xquota_set,
403 .set_xstate = cifs_xstate_set,
404 .get_xstate = cifs_xstate_get,
405};
406#endif
407
408static int cifs_remount(struct super_block *sb, int *flags, char *data)
409{
410 *flags |= MS_NODIRATIME;
411 return 0;
412}
413
414struct super_operations cifs_super_ops = {
415 .read_inode = cifs_read_inode,
416 .put_super = cifs_put_super,
417 .statfs = cifs_statfs,
418 .alloc_inode = cifs_alloc_inode,
419 .destroy_inode = cifs_destroy_inode,
420/* .drop_inode = generic_delete_inode,
421 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
422 unless later we add lazy close of inodes or unless the kernel forgets to call
423 us with the same number of releases (closes) as opens */
424 .show_options = cifs_show_options,
425/* .umount_begin = cifs_umount_begin, *//* consider adding in the future */
426 .remount_fs = cifs_remount,
427};
428
429static struct super_block *
430cifs_get_sb(struct file_system_type *fs_type,
431 int flags, const char *dev_name, void *data)
432{
433 int rc;
434 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
435
436 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
437
438 if (IS_ERR(sb))
439 return sb;
440
441 sb->s_flags = flags;
442
443 rc = cifs_read_super(sb, data, dev_name, flags & MS_VERBOSE ? 1 : 0);
444 if (rc) {
445 up_write(&sb->s_umount);
446 deactivate_super(sb);
447 return ERR_PTR(rc);
448 }
449 sb->s_flags |= MS_ACTIVE;
450 return sb;
451}
452
453static ssize_t
454cifs_read_wrapper(struct file * file, char __user *read_data, size_t read_size,
455 loff_t * poffset)
456{
457 if(file->f_dentry == NULL)
458 return -EIO;
459 else if(file->f_dentry->d_inode == NULL)
460 return -EIO;
461
462 cFYI(1,("In read_wrapper size %zd at %lld",read_size,*poffset));
463
464 if(CIFS_I(file->f_dentry->d_inode)->clientCanCacheRead) {
465 return generic_file_read(file,read_data,read_size,poffset);
466 } else {
467 /* BB do we need to lock inode from here until after invalidate? */
468/* if(file->f_dentry->d_inode->i_mapping) {
469 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
470 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
471 }*/
472/* cifs_revalidate(file->f_dentry);*/ /* BB fixme */
473
474 /* BB we should make timer configurable - perhaps
475 by simply calling cifs_revalidate here */
476 /* invalidate_remote_inode(file->f_dentry->d_inode);*/
477 return generic_file_read(file,read_data,read_size,poffset);
478 }
479}
480
481static ssize_t
482cifs_write_wrapper(struct file * file, const char __user *write_data,
483 size_t write_size, loff_t * poffset)
484{
485 ssize_t written;
486
487 if(file->f_dentry == NULL)
488 return -EIO;
489 else if(file->f_dentry->d_inode == NULL)
490 return -EIO;
491
492 cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset));
493
494 written = generic_file_write(file,write_data,write_size,poffset);
495 if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll) {
496 if(file->f_dentry->d_inode->i_mapping) {
497 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
498 }
499 }
500 return written;
501}
502
503
504static struct file_system_type cifs_fs_type = {
505 .owner = THIS_MODULE,
506 .name = "cifs",
507 .get_sb = cifs_get_sb,
508 .kill_sb = kill_anon_super,
509 /* .fs_flags */
510};
511struct inode_operations cifs_dir_inode_ops = {
512 .create = cifs_create,
513 .lookup = cifs_lookup,
514 .getattr = cifs_getattr,
515 .unlink = cifs_unlink,
516 .link = cifs_hardlink,
517 .mkdir = cifs_mkdir,
518 .rmdir = cifs_rmdir,
519 .rename = cifs_rename,
520 .permission = cifs_permission,
521/* revalidate:cifs_revalidate, */
522 .setattr = cifs_setattr,
523 .symlink = cifs_symlink,
524 .mknod = cifs_mknod,
525#ifdef CONFIG_CIFS_XATTR
526 .setxattr = cifs_setxattr,
527 .getxattr = cifs_getxattr,
528 .listxattr = cifs_listxattr,
529 .removexattr = cifs_removexattr,
530#endif
531};
532
533struct inode_operations cifs_file_inode_ops = {
534/* revalidate:cifs_revalidate, */
535 .setattr = cifs_setattr,
536 .getattr = cifs_getattr, /* do we need this anymore? */
537 .rename = cifs_rename,
538 .permission = cifs_permission,
539#ifdef CONFIG_CIFS_XATTR
540 .setxattr = cifs_setxattr,
541 .getxattr = cifs_getxattr,
542 .listxattr = cifs_listxattr,
543 .removexattr = cifs_removexattr,
544#endif
545};
546
547struct inode_operations cifs_symlink_inode_ops = {
548 .readlink = generic_readlink,
549 .follow_link = cifs_follow_link,
550 .put_link = cifs_put_link,
551 .permission = cifs_permission,
552 /* BB add the following two eventually */
553 /* revalidate: cifs_revalidate,
554 setattr: cifs_notify_change, *//* BB do we need notify change */
555#ifdef CONFIG_CIFS_XATTR
556 .setxattr = cifs_setxattr,
557 .getxattr = cifs_getxattr,
558 .listxattr = cifs_listxattr,
559 .removexattr = cifs_removexattr,
560#endif
561};
562
563struct file_operations cifs_file_ops = {
564 .read = cifs_read_wrapper,
565 .write = cifs_write_wrapper,
566 .open = cifs_open,
567 .release = cifs_close,
568 .lock = cifs_lock,
569 .fsync = cifs_fsync,
570 .flush = cifs_flush,
571 .mmap = cifs_file_mmap,
572 .sendfile = generic_file_sendfile,
c67593a0
SF
573#ifdef CONFIG_CIFS_POSIX
574 .ioctl = cifs_ioctl,
575#endif /* CONFIG_CIFS_POSIX */
576
1da177e4
LT
577#ifdef CONFIG_CIFS_EXPERIMENTAL
578 .readv = generic_file_readv,
579 .writev = generic_file_writev,
580 .aio_read = generic_file_aio_read,
581 .aio_write = generic_file_aio_write,
582 .dir_notify = cifs_dir_notify,
583#endif /* CONFIG_CIFS_EXPERIMENTAL */
584};
585
586struct file_operations cifs_file_direct_ops = {
587 /* no mmap, no aio, no readv -
588 BB reevaluate whether they can be done with directio, no cache */
589 .read = cifs_user_read,
590 .write = cifs_user_write,
591 .open = cifs_open,
592 .release = cifs_close,
593 .lock = cifs_lock,
594 .fsync = cifs_fsync,
595 .flush = cifs_flush,
596 .sendfile = generic_file_sendfile, /* BB removeme BB */
c67593a0
SF
597#ifdef CONFIG_CIFS_POSIX
598 .ioctl = cifs_ioctl,
599#endif /* CONFIG_CIFS_POSIX */
600
1da177e4
LT
601#ifdef CONFIG_CIFS_EXPERIMENTAL
602 .dir_notify = cifs_dir_notify,
603#endif /* CONFIG_CIFS_EXPERIMENTAL */
604};
605
606struct file_operations cifs_dir_ops = {
607 .readdir = cifs_readdir,
608 .release = cifs_closedir,
609 .read = generic_read_dir,
610#ifdef CONFIG_CIFS_EXPERIMENTAL
611 .dir_notify = cifs_dir_notify,
612#endif /* CONFIG_CIFS_EXPERIMENTAL */
f28ac91b 613 .ioctl = cifs_ioctl,
1da177e4
LT
614};
615
616static void
617cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
618{
619 struct cifsInodeInfo *cifsi = inode;
620
621 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
622 SLAB_CTOR_CONSTRUCTOR) {
623 inode_init_once(&cifsi->vfs_inode);
624 INIT_LIST_HEAD(&cifsi->lockList);
625 }
626}
627
628static int
629cifs_init_inodecache(void)
630{
631 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
632 sizeof (struct cifsInodeInfo),
633 0, SLAB_RECLAIM_ACCOUNT,
634 cifs_init_once, NULL);
635 if (cifs_inode_cachep == NULL)
636 return -ENOMEM;
637
638 return 0;
639}
640
641static void
642cifs_destroy_inodecache(void)
643{
644 if (kmem_cache_destroy(cifs_inode_cachep))
645 printk(KERN_WARNING "cifs_inode_cache: error freeing\n");
646}
647
648static int
649cifs_init_request_bufs(void)
650{
651 if(CIFSMaxBufSize < 8192) {
652 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
653 Unicode path name has to fit in any SMB/CIFS path based frames */
654 CIFSMaxBufSize = 8192;
655 } else if (CIFSMaxBufSize > 1024*127) {
656 CIFSMaxBufSize = 1024 * 127;
657 } else {
658 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
659 }
660/* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
661 cifs_req_cachep = kmem_cache_create("cifs_request",
662 CIFSMaxBufSize +
663 MAX_CIFS_HDR_SIZE, 0,
664 SLAB_HWCACHE_ALIGN, NULL, NULL);
665 if (cifs_req_cachep == NULL)
666 return -ENOMEM;
667
668 if(cifs_min_rcv < 1)
669 cifs_min_rcv = 1;
670 else if (cifs_min_rcv > 64) {
671 cifs_min_rcv = 64;
672 cERROR(1,("cifs_min_rcv set to maximum (64)"));
673 }
674
675 cifs_req_poolp = mempool_create(cifs_min_rcv,
676 mempool_alloc_slab,
677 mempool_free_slab,
678 cifs_req_cachep);
679
680 if(cifs_req_poolp == NULL) {
681 kmem_cache_destroy(cifs_req_cachep);
682 return -ENOMEM;
683 }
684 /* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and
685 almost all handle based requests (but not write response, nor is it
686 sufficient for path based requests). A smaller size would have
687 been more efficient (compacting multiple slab items on one 4k page)
688 for the case in which debug was on, but this larger size allows
689 more SMBs to use small buffer alloc and is still much more
690 efficient to alloc 1 per page off the slab compared to 17K (5page)
691 alloc of large cifs buffers even when page debugging is on */
692 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
693 MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
694 if (cifs_sm_req_cachep == NULL) {
695 mempool_destroy(cifs_req_poolp);
696 kmem_cache_destroy(cifs_req_cachep);
697 return -ENOMEM;
698 }
699
700 if(cifs_min_small < 2)
701 cifs_min_small = 2;
702 else if (cifs_min_small > 256) {
703 cifs_min_small = 256;
704 cFYI(1,("cifs_min_small set to maximum (256)"));
705 }
706
707 cifs_sm_req_poolp = mempool_create(cifs_min_small,
708 mempool_alloc_slab,
709 mempool_free_slab,
710 cifs_sm_req_cachep);
711
712 if(cifs_sm_req_poolp == NULL) {
713 mempool_destroy(cifs_req_poolp);
714 kmem_cache_destroy(cifs_req_cachep);
715 kmem_cache_destroy(cifs_sm_req_cachep);
716 return -ENOMEM;
717 }
718
719 return 0;
720}
721
722static void
723cifs_destroy_request_bufs(void)
724{
725 mempool_destroy(cifs_req_poolp);
726 if (kmem_cache_destroy(cifs_req_cachep))
727 printk(KERN_WARNING
728 "cifs_destroy_request_cache: error not all structures were freed\n");
729 mempool_destroy(cifs_sm_req_poolp);
730 if (kmem_cache_destroy(cifs_sm_req_cachep))
731 printk(KERN_WARNING
732 "cifs_destroy_request_cache: cifs_small_rq free error\n");
733}
734
735static int
736cifs_init_mids(void)
737{
738 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
739 sizeof (struct mid_q_entry), 0,
740 SLAB_HWCACHE_ALIGN, NULL, NULL);
741 if (cifs_mid_cachep == NULL)
742 return -ENOMEM;
743
744 cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */,
745 mempool_alloc_slab,
746 mempool_free_slab,
747 cifs_mid_cachep);
748 if(cifs_mid_poolp == NULL) {
749 kmem_cache_destroy(cifs_mid_cachep);
750 return -ENOMEM;
751 }
752
753 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
754 sizeof (struct oplock_q_entry), 0,
755 SLAB_HWCACHE_ALIGN, NULL, NULL);
756 if (cifs_oplock_cachep == NULL) {
757 kmem_cache_destroy(cifs_mid_cachep);
758 mempool_destroy(cifs_mid_poolp);
759 return -ENOMEM;
760 }
761
762 return 0;
763}
764
765static void
766cifs_destroy_mids(void)
767{
768 mempool_destroy(cifs_mid_poolp);
769 if (kmem_cache_destroy(cifs_mid_cachep))
770 printk(KERN_WARNING
771 "cifs_destroy_mids: error not all structures were freed\n");
772
773 if (kmem_cache_destroy(cifs_oplock_cachep))
774 printk(KERN_WARNING
775 "error not all oplock structures were freed\n");
776}
777
778static int cifs_oplock_thread(void * dummyarg)
779{
780 struct oplock_q_entry * oplock_item;
781 struct cifsTconInfo *pTcon;
782 struct inode * inode;
783 __u16 netfid;
784 int rc;
785
786 daemonize("cifsoplockd");
787 allow_signal(SIGTERM);
788
789 oplockThread = current;
790 do {
16abbecd
SF
791 if(try_to_freeze())
792 continue;
1da177e4
LT
793 set_current_state(TASK_INTERRUPTIBLE);
794
795 schedule_timeout(1*HZ);
796 spin_lock(&GlobalMid_Lock);
797 if(list_empty(&GlobalOplock_Q)) {
798 spin_unlock(&GlobalMid_Lock);
799 set_current_state(TASK_INTERRUPTIBLE);
800 schedule_timeout(39*HZ);
801 } else {
802 oplock_item = list_entry(GlobalOplock_Q.next,
803 struct oplock_q_entry, qhead);
804 if(oplock_item) {
805 cFYI(1,("found oplock item to write out"));
806 pTcon = oplock_item->tcon;
807 inode = oplock_item->pinode;
808 netfid = oplock_item->netfid;
809 spin_unlock(&GlobalMid_Lock);
810 DeleteOplockQEntry(oplock_item);
811 /* can not grab inode sem here since it would
812 deadlock when oplock received on delete
813 since vfs_unlink holds the i_sem across
814 the call */
815 /* down(&inode->i_sem);*/
816 if (S_ISREG(inode->i_mode)) {
817 rc = filemap_fdatawrite(inode->i_mapping);
818 if(CIFS_I(inode)->clientCanCacheRead == 0) {
819 filemap_fdatawait(inode->i_mapping);
820 invalidate_remote_inode(inode);
821 }
822 } else
823 rc = 0;
824 /* up(&inode->i_sem);*/
825 if (rc)
826 CIFS_I(inode)->write_behind_rc = rc;
827 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
828
829 /* releasing a stale oplock after recent reconnection
830 of smb session using a now incorrect file
831 handle is not a data integrity issue but do
832 not bother sending an oplock release if session
833 to server still is disconnected since oplock
834 already released by the server in that case */
835 if(pTcon->tidStatus != CifsNeedReconnect) {
836 rc = CIFSSMBLock(0, pTcon, netfid,
837 0 /* len */ , 0 /* offset */, 0,
838 0, LOCKING_ANDX_OPLOCK_RELEASE,
839 0 /* wait flag */);
840 cFYI(1,("Oplock release rc = %d ",rc));
841 }
842 } else
843 spin_unlock(&GlobalMid_Lock);
844 }
845 } while(!signal_pending(current));
57337e42 846 oplockThread = NULL;
f191401f 847 complete_and_exit (&cifs_oplock_exited, 0);
1da177e4
LT
848}
849
8d0d5094
SF
850static int cifs_dnotify_thread(void * dummyarg)
851{
852 daemonize("cifsdnotifyd");
853 allow_signal(SIGTERM);
854
855 dnotifyThread = current;
856 do {
16abbecd
SF
857 if(try_to_freeze())
858 continue;
8d0d5094
SF
859 set_current_state(TASK_INTERRUPTIBLE);
860 schedule_timeout(39*HZ);
861 } while(!signal_pending(current));
862 complete_and_exit (&cifs_dnotify_exited, 0);
863}
864
1da177e4
LT
865static int __init
866init_cifs(void)
867{
868 int rc = 0;
869#ifdef CONFIG_PROC_FS
870 cifs_proc_init();
871#endif
872 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
873 INIT_LIST_HEAD(&GlobalSMBSessionList);
874 INIT_LIST_HEAD(&GlobalTreeConnectionList);
875 INIT_LIST_HEAD(&GlobalOplock_Q);
876/*
877 * Initialize Global counters
878 */
879 atomic_set(&sesInfoAllocCount, 0);
880 atomic_set(&tconInfoAllocCount, 0);
881 atomic_set(&tcpSesAllocCount,0);
882 atomic_set(&tcpSesReconnectCount, 0);
883 atomic_set(&tconInfoReconnectCount, 0);
884
885 atomic_set(&bufAllocCount, 0);
886 atomic_set(&midCount, 0);
887 GlobalCurrentXid = 0;
888 GlobalTotalActiveXid = 0;
889 GlobalMaxActiveXid = 0;
890 rwlock_init(&GlobalSMBSeslock);
891 spin_lock_init(&GlobalMid_Lock);
892
893 if(cifs_max_pending < 2) {
894 cifs_max_pending = 2;
895 cFYI(1,("cifs_max_pending set to min of 2"));
896 } else if(cifs_max_pending > 256) {
897 cifs_max_pending = 256;
898 cFYI(1,("cifs_max_pending set to max of 256"));
899 }
900
901 rc = cifs_init_inodecache();
902 if (!rc) {
903 rc = cifs_init_mids();
904 if (!rc) {
905 rc = cifs_init_request_bufs();
906 if (!rc) {
907 rc = register_filesystem(&cifs_fs_type);
908 if (!rc) {
909 rc = (int)kernel_thread(cifs_oplock_thread, NULL,
910 CLONE_FS | CLONE_FILES | CLONE_VM);
8d0d5094
SF
911 if(rc > 0) {
912 rc = (int)kernel_thread(cifs_dnotify_thread, NULL,
913 CLONE_FS | CLONE_FILES | CLONE_VM);
914 if(rc > 0)
915 return 0;
916 else
917 cERROR(1,("error %d create dnotify thread", rc));
918 } else {
1da177e4 919 cERROR(1,("error %d create oplock thread",rc));
8d0d5094 920 }
1da177e4
LT
921 }
922 cifs_destroy_request_bufs();
923 }
924 cifs_destroy_mids();
925 }
926 cifs_destroy_inodecache();
927 }
928#ifdef CONFIG_PROC_FS
929 cifs_proc_clean();
930#endif
931 return rc;
932}
933
934static void __exit
935exit_cifs(void)
936{
937 cFYI(0, ("In unregister ie exit_cifs"));
938#ifdef CONFIG_PROC_FS
939 cifs_proc_clean();
940#endif
941 unregister_filesystem(&cifs_fs_type);
942 cifs_destroy_inodecache();
943 cifs_destroy_mids();
944 cifs_destroy_request_bufs();
945 if(oplockThread) {
946 send_sig(SIGTERM, oplockThread, 1);
947 wait_for_completion(&cifs_oplock_exited);
948 }
8d0d5094
SF
949 if(dnotifyThread) {
950 send_sig(SIGTERM, dnotifyThread, 1);
951 wait_for_completion(&cifs_dnotify_exited);
952 }
1da177e4
LT
953}
954
955MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
956MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
957MODULE_DESCRIPTION
958 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
959MODULE_VERSION(CIFS_VERSION);
960module_init(init_cifs)
961module_exit(exit_cifs)