]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ceph/super.c
V4L/DVB: drivers/media/video/zoran: Don't use initialized char array
[net-next-2.6.git] / fs / ceph / super.c
CommitLineData
16725b9d
SW
1
2#include "ceph_debug.h"
3
4#include <linux/backing-dev.h>
c309f0ab 5#include <linux/ctype.h>
16725b9d
SW
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
16725b9d
SW
12#include <linux/sched.h>
13#include <linux/seq_file.h>
5a0e3ad6 14#include <linux/slab.h>
16725b9d
SW
15#include <linux/statfs.h>
16#include <linux/string.h>
16725b9d 17
16725b9d
SW
18#include "decode.h"
19#include "super.h"
20#include "mon_client.h"
0743304d 21#include "auth.h"
16725b9d
SW
22
23/*
24 * Ceph superblock operations
25 *
26 * Handle the basics of mounting, unmounting.
27 */
28
29
30/*
31 * find filename portion of a path (/foo/bar/baz -> baz)
32 */
33const char *ceph_file_part(const char *s, int len)
34{
35 const char *e = s + len;
36
37 while (e != s && *(e-1) != '/')
38 e--;
39 return e;
40}
41
42
43/*
44 * super ops
45 */
46static void ceph_put_super(struct super_block *s)
47{
5dfc589a 48 struct ceph_client *client = ceph_sb_to_client(s);
16725b9d
SW
49
50 dout("put_super\n");
5dfc589a
SW
51 ceph_mdsc_close_sessions(&client->mdsc);
52
53 /*
54 * ensure we release the bdi before put_anon_super releases
55 * the device name.
56 */
57 if (s->s_bdi == &client->backing_dev_info) {
58 bdi_unregister(&client->backing_dev_info);
59 s->s_bdi = NULL;
60 }
61
16725b9d
SW
62 return;
63}
64
65static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
66{
67 struct ceph_client *client = ceph_inode_to_client(dentry->d_inode);
68 struct ceph_monmap *monmap = client->monc.monmap;
69 struct ceph_statfs st;
70 u64 fsid;
71 int err;
72
73 dout("statfs\n");
74 err = ceph_monc_do_statfs(&client->monc, &st);
75 if (err < 0)
76 return err;
77
78 /* fill in kstatfs */
79 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
80
81 /*
82 * express utilization in terms of large blocks to avoid
83 * overflow on 32-bit machines.
84 */
85 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
86 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
87 buf->f_bfree = (le64_to_cpu(st.kb) - le64_to_cpu(st.kb_used)) >>
88 (CEPH_BLOCK_SHIFT-10);
89 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
90
91 buf->f_files = le64_to_cpu(st.num_objects);
92 buf->f_ffree = -1;
558d3499 93 buf->f_namelen = NAME_MAX;
16725b9d
SW
94 buf->f_frsize = PAGE_CACHE_SIZE;
95
96 /* leave fsid little-endian, regardless of host endianness */
97 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
98 buf->f_fsid.val[0] = fsid & 0xffffffff;
99 buf->f_fsid.val[1] = fsid >> 32;
100
101 return 0;
102}
103
104
2d9c98ae 105static int ceph_sync_fs(struct super_block *sb, int wait)
16725b9d 106{
2d9c98ae
SW
107 struct ceph_client *client = ceph_sb_to_client(sb);
108
109 if (!wait) {
110 dout("sync_fs (non-blocking)\n");
111 ceph_flush_dirty_caps(&client->mdsc);
112 dout("sync_fs (non-blocking) done\n");
113 return 0;
114 }
115
116 dout("sync_fs (blocking)\n");
640ef79d
CR
117 ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc);
118 ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc);
2d9c98ae 119 dout("sync_fs (blocking) done\n");
16725b9d
SW
120 return 0;
121}
122
6e19a16e
SW
123static int default_congestion_kb(void)
124{
125 int congestion_kb;
126
127 /*
128 * Copied from NFS
129 *
130 * congestion size, scale with available memory.
131 *
132 * 64MB: 8192k
133 * 128MB: 11585k
134 * 256MB: 16384k
135 * 512MB: 23170k
136 * 1GB: 32768k
137 * 2GB: 46340k
138 * 4GB: 65536k
139 * 8GB: 92681k
140 * 16GB: 131072k
141 *
142 * This allows larger machines to have larger/more transfers.
143 * Limit the default to 256M
144 */
145 congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
146 if (congestion_kb > 256*1024)
147 congestion_kb = 256*1024;
148
149 return congestion_kb;
150}
16725b9d
SW
151
152/**
153 * ceph_show_options - Show mount options in /proc/mounts
154 * @m: seq_file to write to
155 * @mnt: mount descriptor
156 */
157static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
158{
159 struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
6b805185 160 struct ceph_mount_args *args = client->mount_args;
16725b9d
SW
161
162 if (args->flags & CEPH_OPT_FSID)
a8b763a9 163 seq_printf(m, ",fsid=%pU", &args->fsid);
16725b9d
SW
164 if (args->flags & CEPH_OPT_NOSHARE)
165 seq_puts(m, ",noshare");
166 if (args->flags & CEPH_OPT_DIRSTAT)
167 seq_puts(m, ",dirstat");
168 if ((args->flags & CEPH_OPT_RBYTES) == 0)
169 seq_puts(m, ",norbytes");
170 if (args->flags & CEPH_OPT_NOCRC)
171 seq_puts(m, ",nocrc");
172 if (args->flags & CEPH_OPT_NOASYNCREADDIR)
173 seq_puts(m, ",noasyncreaddir");
6e19a16e
SW
174
175 if (args->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
176 seq_printf(m, ",mount_timeout=%d", args->mount_timeout);
177 if (args->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
178 seq_printf(m, ",osd_idle_ttl=%d", args->osd_idle_ttl);
179 if (args->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
180 seq_printf(m, ",osdtimeout=%d", args->osd_timeout);
181 if (args->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
182 seq_printf(m, ",osdkeepalivetimeout=%d",
183 args->osd_keepalive_timeout);
184 if (args->wsize)
185 seq_printf(m, ",wsize=%d", args->wsize);
186 if (args->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
187 seq_printf(m, ",rsize=%d", args->rsize);
188 if (args->congestion_kb != default_congestion_kb())
189 seq_printf(m, ",write_congestion_kb=%d", args->congestion_kb);
190 if (args->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
191 seq_printf(m, ",caps_wanted_delay_min=%d",
192 args->caps_wanted_delay_min);
193 if (args->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
194 seq_printf(m, ",caps_wanted_delay_max=%d",
195 args->caps_wanted_delay_max);
196 if (args->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
197 seq_printf(m, ",cap_release_safety=%d",
198 args->cap_release_safety);
199 if (args->max_readdir != CEPH_MAX_READDIR_DEFAULT)
200 seq_printf(m, ",readdir_max_entries=%d", args->max_readdir);
23804d91
SW
201 if (args->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
202 seq_printf(m, ",readdir_max_bytes=%d", args->max_readdir_bytes);
16725b9d
SW
203 if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
204 seq_printf(m, ",snapdirname=%s", args->snapdir_name);
4e7a5dcd
SW
205 if (args->name)
206 seq_printf(m, ",name=%s", args->name);
16725b9d
SW
207 if (args->secret)
208 seq_puts(m, ",secret=<hidden>");
209 return 0;
210}
211
212/*
213 * caches
214 */
215struct kmem_cache *ceph_inode_cachep;
216struct kmem_cache *ceph_cap_cachep;
217struct kmem_cache *ceph_dentry_cachep;
218struct kmem_cache *ceph_file_cachep;
219
220static void ceph_inode_init_once(void *foo)
221{
222 struct ceph_inode_info *ci = foo;
223 inode_init_once(&ci->vfs_inode);
224}
225
226static int __init init_caches(void)
227{
228 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
229 sizeof(struct ceph_inode_info),
230 __alignof__(struct ceph_inode_info),
231 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
232 ceph_inode_init_once);
233 if (ceph_inode_cachep == NULL)
234 return -ENOMEM;
235
236 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
237 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
238 if (ceph_cap_cachep == NULL)
239 goto bad_cap;
240
241 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
242 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
243 if (ceph_dentry_cachep == NULL)
244 goto bad_dentry;
245
246 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
247 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
248 if (ceph_file_cachep == NULL)
249 goto bad_file;
250
251 return 0;
252
253bad_file:
254 kmem_cache_destroy(ceph_dentry_cachep);
255bad_dentry:
256 kmem_cache_destroy(ceph_cap_cachep);
257bad_cap:
258 kmem_cache_destroy(ceph_inode_cachep);
259 return -ENOMEM;
260}
261
262static void destroy_caches(void)
263{
264 kmem_cache_destroy(ceph_inode_cachep);
265 kmem_cache_destroy(ceph_cap_cachep);
266 kmem_cache_destroy(ceph_dentry_cachep);
267 kmem_cache_destroy(ceph_file_cachep);
268}
269
270
271/*
272 * ceph_umount_begin - initiate forced umount. Tear down down the
273 * mount, skipping steps that may hang while waiting for server(s).
274 */
275static void ceph_umount_begin(struct super_block *sb)
276{
277 struct ceph_client *client = ceph_sb_to_client(sb);
278
279 dout("ceph_umount_begin - starting forced umount\n");
280 if (!client)
281 return;
282 client->mount_state = CEPH_MOUNT_SHUTDOWN;
283 return;
284}
285
286static const struct super_operations ceph_super_ops = {
287 .alloc_inode = ceph_alloc_inode,
288 .destroy_inode = ceph_destroy_inode,
289 .write_inode = ceph_write_inode,
2d9c98ae 290 .sync_fs = ceph_sync_fs,
16725b9d
SW
291 .put_super = ceph_put_super,
292 .show_options = ceph_show_options,
293 .statfs = ceph_statfs,
294 .umount_begin = ceph_umount_begin,
295};
296
297
298const char *ceph_msg_type_name(int type)
299{
300 switch (type) {
301 case CEPH_MSG_SHUTDOWN: return "shutdown";
302 case CEPH_MSG_PING: return "ping";
4e7a5dcd
SW
303 case CEPH_MSG_AUTH: return "auth";
304 case CEPH_MSG_AUTH_REPLY: return "auth_reply";
16725b9d
SW
305 case CEPH_MSG_MON_MAP: return "mon_map";
306 case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
307 case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
308 case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
16725b9d
SW
309 case CEPH_MSG_STATFS: return "statfs";
310 case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
16725b9d
SW
311 case CEPH_MSG_MDS_MAP: return "mds_map";
312 case CEPH_MSG_CLIENT_SESSION: return "client_session";
313 case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
314 case CEPH_MSG_CLIENT_REQUEST: return "client_request";
315 case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
316 case CEPH_MSG_CLIENT_REPLY: return "client_reply";
317 case CEPH_MSG_CLIENT_CAPS: return "client_caps";
318 case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
319 case CEPH_MSG_CLIENT_SNAP: return "client_snap";
320 case CEPH_MSG_CLIENT_LEASE: return "client_lease";
16725b9d
SW
321 case CEPH_MSG_OSD_MAP: return "osd_map";
322 case CEPH_MSG_OSD_OP: return "osd_op";
323 case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
324 default: return "unknown";
325 }
326}
327
328
329/*
330 * mount options
331 */
332enum {
16725b9d
SW
333 Opt_wsize,
334 Opt_rsize,
335 Opt_osdtimeout,
422d2cb8 336 Opt_osdkeepalivetimeout,
16725b9d 337 Opt_mount_timeout,
f5a2041b 338 Opt_osd_idle_ttl,
16725b9d
SW
339 Opt_caps_wanted_delay_min,
340 Opt_caps_wanted_delay_max,
6e19a16e 341 Opt_cap_release_safety,
16725b9d 342 Opt_readdir_max_entries,
23804d91 343 Opt_readdir_max_bytes,
2baba250 344 Opt_congestion_kb,
e53c2fe0 345 Opt_last_int,
16725b9d 346 /* int args above */
c309f0ab 347 Opt_fsid,
16725b9d 348 Opt_snapdirname,
4e7a5dcd 349 Opt_name,
16725b9d 350 Opt_secret,
e53c2fe0 351 Opt_last_string,
16725b9d
SW
352 /* string args above */
353 Opt_ip,
354 Opt_noshare,
355 Opt_dirstat,
356 Opt_nodirstat,
357 Opt_rbytes,
358 Opt_norbytes,
359 Opt_nocrc,
360 Opt_noasyncreaddir,
361};
362
363static match_table_t arg_tokens = {
16725b9d
SW
364 {Opt_wsize, "wsize=%d"},
365 {Opt_rsize, "rsize=%d"},
366 {Opt_osdtimeout, "osdtimeout=%d"},
422d2cb8 367 {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
16725b9d 368 {Opt_mount_timeout, "mount_timeout=%d"},
f5a2041b 369 {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
16725b9d
SW
370 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
371 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
6e19a16e 372 {Opt_cap_release_safety, "cap_release_safety=%d"},
16725b9d 373 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
23804d91 374 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
2baba250 375 {Opt_congestion_kb, "write_congestion_kb=%d"},
16725b9d 376 /* int args above */
c309f0ab 377 {Opt_fsid, "fsid=%s"},
16725b9d 378 {Opt_snapdirname, "snapdirname=%s"},
4e7a5dcd 379 {Opt_name, "name=%s"},
16725b9d
SW
380 {Opt_secret, "secret=%s"},
381 /* string args above */
382 {Opt_ip, "ip=%s"},
383 {Opt_noshare, "noshare"},
384 {Opt_dirstat, "dirstat"},
385 {Opt_nodirstat, "nodirstat"},
386 {Opt_rbytes, "rbytes"},
387 {Opt_norbytes, "norbytes"},
388 {Opt_nocrc, "nocrc"},
389 {Opt_noasyncreaddir, "noasyncreaddir"},
390 {-1, NULL}
391};
392
c309f0ab
SW
393static int parse_fsid(const char *str, struct ceph_fsid *fsid)
394{
395 int i = 0;
396 char tmp[3];
397 int err = -EINVAL;
398 int d;
399
400 dout("parse_fsid '%s'\n", str);
401 tmp[2] = 0;
402 while (*str && i < 16) {
403 if (ispunct(*str)) {
404 str++;
405 continue;
406 }
407 if (!isxdigit(str[0]) || !isxdigit(str[1]))
408 break;
409 tmp[0] = str[0];
410 tmp[1] = str[1];
411 if (sscanf(tmp, "%x", &d) < 1)
412 break;
413 fsid->fsid[i] = d & 0xff;
414 i++;
415 str += 2;
416 }
417
418 if (i == 16)
419 err = 0;
a8b763a9 420 dout("parse_fsid ret %d got fsid %pU", err, fsid);
c309f0ab
SW
421 return err;
422}
16725b9d 423
6b805185
SW
424static struct ceph_mount_args *parse_mount_args(int flags, char *options,
425 const char *dev_name,
426 const char **path)
16725b9d 427{
6b805185 428 struct ceph_mount_args *args;
16725b9d 429 const char *c;
6b805185 430 int err = -ENOMEM;
16725b9d 431 substring_t argstr[MAX_OPT_ARGS];
16725b9d 432
6b805185
SW
433 args = kzalloc(sizeof(*args), GFP_KERNEL);
434 if (!args)
435 return ERR_PTR(-ENOMEM);
436 args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr),
437 GFP_KERNEL);
438 if (!args->mon_addr)
439 goto out;
16725b9d 440
6b805185 441 dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name);
7b813c46 442
16725b9d
SW
443 /* start with defaults */
444 args->sb_flags = flags;
445 args->flags = CEPH_OPT_DEFAULT;
422d2cb8
YS
446 args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
447 args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
16725b9d 448 args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
f5a2041b 449 args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
16725b9d
SW
450 args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
451 args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
8fa97655 452 args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
16725b9d 453 args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
6e19a16e
SW
454 args->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
455 args->max_readdir = CEPH_MAX_READDIR_DEFAULT;
23804d91 456 args->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
2baba250 457 args->congestion_kb = default_congestion_kb();
16725b9d
SW
458
459 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
7b813c46 460 err = -EINVAL;
16725b9d 461 if (!dev_name)
7b813c46 462 goto out;
16725b9d
SW
463 *path = strstr(dev_name, ":/");
464 if (*path == NULL) {
465 pr_err("device name is missing path (no :/ in %s)\n",
466 dev_name);
7b813c46 467 goto out;
16725b9d
SW
468 }
469
470 /* get mon ip(s) */
6b805185
SW
471 err = ceph_parse_ips(dev_name, *path, args->mon_addr,
472 CEPH_MAX_MON, &args->num_mon);
16725b9d 473 if (err < 0)
7b813c46 474 goto out;
16725b9d 475
16725b9d
SW
476 /* path on server */
477 *path += 2;
478 dout("server path '%s'\n", *path);
479
480 /* parse mount options */
481 while ((c = strsep(&options, ",")) != NULL) {
482 int token, intval, ret;
483 if (!*c)
484 continue;
7b813c46 485 err = -EINVAL;
16725b9d
SW
486 token = match_token((char *)c, arg_tokens, argstr);
487 if (token < 0) {
488 pr_err("bad mount option at '%s'\n", c);
7b813c46 489 goto out;
16725b9d 490 }
e53c2fe0 491 if (token < Opt_last_int) {
16725b9d
SW
492 ret = match_int(&argstr[0], &intval);
493 if (ret < 0) {
494 pr_err("bad mount option arg (not int) "
495 "at '%s'\n", c);
496 continue;
497 }
e53c2fe0
SW
498 dout("got int token %d val %d\n", token, intval);
499 } else if (token > Opt_last_int && token < Opt_last_string) {
500 dout("got string token %d val %s\n", token,
501 argstr[0].from);
502 } else {
503 dout("got token %d\n", token);
16725b9d
SW
504 }
505 switch (token) {
16725b9d
SW
506 case Opt_ip:
507 err = ceph_parse_ips(argstr[0].from,
508 argstr[0].to,
509 &args->my_addr,
510 1, NULL);
511 if (err < 0)
6b805185 512 goto out;
16725b9d
SW
513 args->flags |= CEPH_OPT_MYIP;
514 break;
515
c309f0ab
SW
516 case Opt_fsid:
517 err = parse_fsid(argstr[0].from, &args->fsid);
518 if (err == 0)
519 args->flags |= CEPH_OPT_FSID;
520 break;
16725b9d
SW
521 case Opt_snapdirname:
522 kfree(args->snapdir_name);
523 args->snapdir_name = kstrndup(argstr[0].from,
524 argstr[0].to-argstr[0].from,
525 GFP_KERNEL);
526 break;
4e7a5dcd
SW
527 case Opt_name:
528 args->name = kstrndup(argstr[0].from,
529 argstr[0].to-argstr[0].from,
530 GFP_KERNEL);
531 break;
16725b9d
SW
532 case Opt_secret:
533 args->secret = kstrndup(argstr[0].from,
534 argstr[0].to-argstr[0].from,
535 GFP_KERNEL);
536 break;
537
538 /* misc */
539 case Opt_wsize:
540 args->wsize = intval;
541 break;
542 case Opt_rsize:
543 args->rsize = intval;
544 break;
545 case Opt_osdtimeout:
546 args->osd_timeout = intval;
547 break;
422d2cb8
YS
548 case Opt_osdkeepalivetimeout:
549 args->osd_keepalive_timeout = intval;
550 break;
e9d17744
SW
551 case Opt_osd_idle_ttl:
552 args->osd_idle_ttl = intval;
553 break;
16725b9d
SW
554 case Opt_mount_timeout:
555 args->mount_timeout = intval;
556 break;
557 case Opt_caps_wanted_delay_min:
558 args->caps_wanted_delay_min = intval;
559 break;
560 case Opt_caps_wanted_delay_max:
561 args->caps_wanted_delay_max = intval;
562 break;
563 case Opt_readdir_max_entries:
564 args->max_readdir = intval;
565 break;
23804d91
SW
566 case Opt_readdir_max_bytes:
567 args->max_readdir_bytes = intval;
568 break;
2baba250
YS
569 case Opt_congestion_kb:
570 args->congestion_kb = intval;
571 break;
16725b9d
SW
572
573 case Opt_noshare:
574 args->flags |= CEPH_OPT_NOSHARE;
575 break;
576
577 case Opt_dirstat:
578 args->flags |= CEPH_OPT_DIRSTAT;
579 break;
580 case Opt_nodirstat:
581 args->flags &= ~CEPH_OPT_DIRSTAT;
582 break;
583 case Opt_rbytes:
584 args->flags |= CEPH_OPT_RBYTES;
585 break;
586 case Opt_norbytes:
587 args->flags &= ~CEPH_OPT_RBYTES;
588 break;
589 case Opt_nocrc:
590 args->flags |= CEPH_OPT_NOCRC;
591 break;
592 case Opt_noasyncreaddir:
593 args->flags |= CEPH_OPT_NOASYNCREADDIR;
594 break;
595
596 default:
597 BUG_ON(token);
598 }
599 }
6b805185 600 return args;
16725b9d 601
7b813c46 602out:
6b805185
SW
603 kfree(args->mon_addr);
604 kfree(args);
605 return ERR_PTR(err);
16725b9d
SW
606}
607
6b805185 608static void destroy_mount_args(struct ceph_mount_args *args)
16725b9d 609{
6b805185 610 dout("destroy_mount_args %p\n", args);
16725b9d
SW
611 kfree(args->snapdir_name);
612 args->snapdir_name = NULL;
4e7a5dcd
SW
613 kfree(args->name);
614 args->name = NULL;
16725b9d
SW
615 kfree(args->secret);
616 args->secret = NULL;
6b805185 617 kfree(args);
16725b9d
SW
618}
619
620/*
621 * create a fresh client instance
622 */
6b805185 623static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
16725b9d
SW
624{
625 struct ceph_client *client;
626 int err = -ENOMEM;
627
628 client = kzalloc(sizeof(*client), GFP_KERNEL);
629 if (client == NULL)
630 return ERR_PTR(-ENOMEM);
631
632 mutex_init(&client->mount_mutex);
633
9bd2e6f8 634 init_waitqueue_head(&client->auth_wq);
16725b9d
SW
635
636 client->sb = NULL;
637 client->mount_state = CEPH_MOUNT_MOUNTING;
6b805185 638 client->mount_args = args;
16725b9d
SW
639
640 client->msgr = NULL;
641
9bd2e6f8 642 client->auth_err = 0;
2baba250 643 atomic_long_set(&client->writeback_count, 0);
16725b9d 644
859e7b14
SW
645 err = bdi_init(&client->backing_dev_info);
646 if (err < 0)
647 goto fail;
648
16725b9d
SW
649 err = -ENOMEM;
650 client->wb_wq = create_workqueue("ceph-writeback");
651 if (client->wb_wq == NULL)
859e7b14 652 goto fail_bdi;
16725b9d
SW
653 client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
654 if (client->pg_inv_wq == NULL)
655 goto fail_wb_wq;
656 client->trunc_wq = create_singlethread_workqueue("ceph-trunc");
657 if (client->trunc_wq == NULL)
658 goto fail_pg_inv_wq;
659
b9bfb93c
SW
660 /* set up mempools */
661 err = -ENOMEM;
662 client->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
663 client->mount_args->wsize >> PAGE_CACHE_SHIFT);
664 if (!client->wb_pagevec_pool)
665 goto fail_trunc_wq;
666
85ccce43
SW
667 /* caps */
668 client->min_caps = args->max_readdir;
b9bfb93c 669
16725b9d
SW
670 /* subsystems */
671 err = ceph_monc_init(&client->monc, client);
672 if (err < 0)
b9bfb93c 673 goto fail_mempool;
16725b9d
SW
674 err = ceph_osdc_init(&client->osdc, client);
675 if (err < 0)
676 goto fail_monc;
5f44f142
SW
677 err = ceph_mdsc_init(&client->mdsc, client);
678 if (err < 0)
679 goto fail_osdc;
16725b9d
SW
680 return client;
681
5f44f142
SW
682fail_osdc:
683 ceph_osdc_stop(&client->osdc);
16725b9d
SW
684fail_monc:
685 ceph_monc_stop(&client->monc);
b9bfb93c
SW
686fail_mempool:
687 mempool_destroy(client->wb_pagevec_pool);
16725b9d
SW
688fail_trunc_wq:
689 destroy_workqueue(client->trunc_wq);
690fail_pg_inv_wq:
691 destroy_workqueue(client->pg_inv_wq);
692fail_wb_wq:
693 destroy_workqueue(client->wb_wq);
859e7b14
SW
694fail_bdi:
695 bdi_destroy(&client->backing_dev_info);
16725b9d
SW
696fail:
697 kfree(client);
698 return ERR_PTR(err);
699}
700
701static void ceph_destroy_client(struct ceph_client *client)
702{
703 dout("destroy_client %p\n", client);
704
705 /* unmount */
706 ceph_mdsc_stop(&client->mdsc);
16725b9d
SW
707 ceph_osdc_stop(&client->osdc);
708
a922d38f
SW
709 /*
710 * make sure mds and osd connections close out before destroying
711 * the auth module, which is needed to free those connections'
712 * ceph_authorizers.
713 */
714 ceph_msgr_flush();
715
716 ceph_monc_stop(&client->monc);
717
16725b9d
SW
718 ceph_debugfs_client_cleanup(client);
719 destroy_workqueue(client->wb_wq);
720 destroy_workqueue(client->pg_inv_wq);
721 destroy_workqueue(client->trunc_wq);
722
5dfc589a
SW
723 bdi_destroy(&client->backing_dev_info);
724
16725b9d
SW
725 if (client->msgr)
726 ceph_messenger_destroy(client->msgr);
b9bfb93c 727 mempool_destroy(client->wb_pagevec_pool);
16725b9d 728
6b805185 729 destroy_mount_args(client->mount_args);
16725b9d
SW
730
731 kfree(client);
732 dout("destroy_client %p done\n", client);
733}
734
0743304d
SW
735/*
736 * Initially learn our fsid, or verify an fsid matches.
737 */
738int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
739{
740 if (client->have_fsid) {
741 if (ceph_fsid_compare(&client->fsid, fsid)) {
a8b763a9
SW
742 pr_err("bad fsid, had %pU got %pU",
743 &client->fsid, fsid);
0743304d
SW
744 return -1;
745 }
746 } else {
a8b763a9
SW
747 pr_info("client%lld fsid %pU\n", client->monc.auth->global_id,
748 fsid);
0743304d
SW
749 memcpy(&client->fsid, fsid, sizeof(*fsid));
750 ceph_debugfs_client_init(client);
751 client->have_fsid = true;
752 }
753 return 0;
754}
755
16725b9d
SW
756/*
757 * true if we have the mon map (and have thus joined the cluster)
758 */
6822d00b 759static int have_mon_and_osd_map(struct ceph_client *client)
16725b9d 760{
6822d00b
SW
761 return client->monc.monmap && client->monc.monmap->epoch &&
762 client->osdc.osdmap && client->osdc.osdmap->epoch;
16725b9d
SW
763}
764
765/*
766 * Bootstrap mount by opening the root directory. Note the mount
767 * @started time from caller, and time out if this takes too long.
768 */
769static struct dentry *open_root_dentry(struct ceph_client *client,
770 const char *path,
771 unsigned long started)
772{
773 struct ceph_mds_client *mdsc = &client->mdsc;
774 struct ceph_mds_request *req = NULL;
775 int err;
776 struct dentry *root;
777
778 /* open dir */
779 dout("open_root_inode opening '%s'\n", path);
780 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
781 if (IS_ERR(req))
7e34bc52 782 return ERR_CAST(req);
16725b9d
SW
783 req->r_path1 = kstrdup(path, GFP_NOFS);
784 req->r_ino1.ino = CEPH_INO_ROOT;
785 req->r_ino1.snap = CEPH_NOSNAP;
786 req->r_started = started;
6b805185 787 req->r_timeout = client->mount_args->mount_timeout * HZ;
16725b9d
SW
788 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
789 req->r_num_caps = 2;
790 err = ceph_mdsc_do_request(mdsc, NULL, req);
791 if (err == 0) {
792 dout("open_root_inode success\n");
793 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
794 client->sb->s_root == NULL)
795 root = d_alloc_root(req->r_target_inode);
796 else
797 root = d_obtain_alias(req->r_target_inode);
798 req->r_target_inode = NULL;
799 dout("open_root_inode success, root dentry is %p\n", root);
800 } else {
801 root = ERR_PTR(err);
802 }
803 ceph_mdsc_put_request(req);
804 return root;
805}
806
807/*
808 * mount: join the ceph cluster, and open root directory.
809 */
810static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt,
811 const char *path)
812{
813 struct ceph_entity_addr *myaddr = NULL;
814 int err;
6b805185 815 unsigned long timeout = client->mount_args->mount_timeout * HZ;
16725b9d
SW
816 unsigned long started = jiffies; /* note the start time */
817 struct dentry *root;
818
819 dout("mount start\n");
820 mutex_lock(&client->mount_mutex);
821
822 /* initialize the messenger */
823 if (client->msgr == NULL) {
824 if (ceph_test_opt(client, MYIP))
6b805185 825 myaddr = &client->mount_args->my_addr;
16725b9d
SW
826 client->msgr = ceph_messenger_create(myaddr);
827 if (IS_ERR(client->msgr)) {
828 err = PTR_ERR(client->msgr);
829 client->msgr = NULL;
830 goto out;
831 }
832 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
833 }
834
4e7a5dcd
SW
835 /* open session, and wait for mon, mds, and osd maps */
836 err = ceph_monc_open_session(&client->monc);
16725b9d
SW
837 if (err < 0)
838 goto out;
839
6822d00b 840 while (!have_mon_and_osd_map(client)) {
16725b9d
SW
841 err = -EIO;
842 if (timeout && time_after_eq(jiffies, started + timeout))
843 goto out;
844
845 /* wait */
4e7a5dcd 846 dout("mount waiting for mon_map\n");
9bd2e6f8 847 err = wait_event_interruptible_timeout(client->auth_wq,
6822d00b
SW
848 have_mon_and_osd_map(client) || (client->auth_err < 0),
849 timeout);
16725b9d
SW
850 if (err == -EINTR || err == -ERESTARTSYS)
851 goto out;
9bd2e6f8
SW
852 if (client->auth_err < 0) {
853 err = client->auth_err;
dc14657c
YS
854 goto out;
855 }
16725b9d
SW
856 }
857
858 dout("mount opening root\n");
859 root = open_root_dentry(client, "", started);
860 if (IS_ERR(root)) {
861 err = PTR_ERR(root);
862 goto out;
863 }
864 if (client->sb->s_root)
865 dput(root);
866 else
867 client->sb->s_root = root;
868
869 if (path[0] == 0) {
870 dget(root);
871 } else {
872 dout("mount opening base mountpoint\n");
873 root = open_root_dentry(client, path, started);
874 if (IS_ERR(root)) {
875 err = PTR_ERR(root);
876 dput(client->sb->s_root);
877 client->sb->s_root = NULL;
878 goto out;
879 }
880 }
881
882 mnt->mnt_root = root;
883 mnt->mnt_sb = client->sb;
884
885 client->mount_state = CEPH_MOUNT_MOUNTED;
886 dout("mount success\n");
887 err = 0;
888
889out:
890 mutex_unlock(&client->mount_mutex);
891 return err;
892}
893
894static int ceph_set_super(struct super_block *s, void *data)
895{
896 struct ceph_client *client = data;
897 int ret;
898
899 dout("set_super %p data %p\n", s, data);
900
6b805185 901 s->s_flags = client->mount_args->sb_flags;
16725b9d
SW
902 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
903
904 s->s_fs_info = client;
905 client->sb = s;
906
907 s->s_op = &ceph_super_ops;
908 s->s_export_op = &ceph_export_ops;
909
910 s->s_time_gran = 1000; /* 1000 ns == 1 us */
911
912 ret = set_anon_super(s, NULL); /* what is that second arg for? */
913 if (ret != 0)
914 goto fail;
915
916 return ret;
917
918fail:
919 s->s_fs_info = NULL;
920 client->sb = NULL;
921 return ret;
922}
923
924/*
925 * share superblock if same fs AND options
926 */
927static int ceph_compare_super(struct super_block *sb, void *data)
928{
929 struct ceph_client *new = data;
6b805185 930 struct ceph_mount_args *args = new->mount_args;
16725b9d
SW
931 struct ceph_client *other = ceph_sb_to_client(sb);
932 int i;
933
934 dout("ceph_compare_super %p\n", sb);
935 if (args->flags & CEPH_OPT_FSID) {
936 if (ceph_fsid_compare(&args->fsid, &other->fsid)) {
937 dout("fsid doesn't match\n");
938 return 0;
939 }
940 } else {
941 /* do we share (a) monitor? */
942 for (i = 0; i < new->monc.monmap->num_mon; i++)
943 if (ceph_monmap_contains(other->monc.monmap,
944 &new->monc.monmap->mon_inst[i].addr))
945 break;
946 if (i == new->monc.monmap->num_mon) {
947 dout("mon ip not part of monmap\n");
948 return 0;
949 }
950 dout("mon ip matches existing sb %p\n", sb);
951 }
6b805185 952 if (args->sb_flags != other->mount_args->sb_flags) {
16725b9d
SW
953 dout("flags differ\n");
954 return 0;
955 }
956 return 1;
957}
958
959/*
960 * construct our own bdi so we can control readahead, etc.
961 */
00d5643e 962static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
31e0cf8f 963
859e7b14 964static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
16725b9d
SW
965{
966 int err;
967
16725b9d 968 /* set ra_pages based on rsize mount option? */
6b805185 969 if (client->mount_args->rsize >= PAGE_CACHE_SIZE)
16725b9d 970 client->backing_dev_info.ra_pages =
6b805185 971 (client->mount_args->rsize + PAGE_CACHE_SIZE - 1)
16725b9d 972 >> PAGE_SHIFT;
31e0cf8f
SW
973 err = bdi_register(&client->backing_dev_info, NULL, "ceph-%d",
974 atomic_long_inc_return(&bdi_seq));
5dfc589a
SW
975 if (!err)
976 sb->s_bdi = &client->backing_dev_info;
16725b9d
SW
977 return err;
978}
979
980static int ceph_get_sb(struct file_system_type *fs_type,
981 int flags, const char *dev_name, void *data,
982 struct vfsmount *mnt)
983{
984 struct super_block *sb;
985 struct ceph_client *client;
986 int err;
987 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
6a18be16 988 const char *path = NULL;
6b805185 989 struct ceph_mount_args *args;
16725b9d
SW
990
991 dout("ceph_get_sb\n");
6b805185
SW
992 args = parse_mount_args(flags, data, dev_name, &path);
993 if (IS_ERR(args)) {
994 err = PTR_ERR(args);
995 goto out_final;
996 }
16725b9d
SW
997
998 /* create client (which we may/may not use) */
6b805185
SW
999 client = ceph_create_client(args);
1000 if (IS_ERR(client)) {
1001 err = PTR_ERR(client);
1002 goto out_final;
1003 }
16725b9d 1004
6b805185 1005 if (client->mount_args->flags & CEPH_OPT_NOSHARE)
16725b9d
SW
1006 compare_super = NULL;
1007 sb = sget(fs_type, compare_super, ceph_set_super, client);
1008 if (IS_ERR(sb)) {
1009 err = PTR_ERR(sb);
1010 goto out;
1011 }
1012
640ef79d 1013 if (ceph_sb_to_client(sb) != client) {
16725b9d 1014 ceph_destroy_client(client);
640ef79d 1015 client = ceph_sb_to_client(sb);
16725b9d
SW
1016 dout("get_sb got existing client %p\n", client);
1017 } else {
1018 dout("get_sb using new client %p\n", client);
859e7b14 1019 err = ceph_register_bdi(sb, client);
16725b9d
SW
1020 if (err < 0)
1021 goto out_splat;
1022 }
1023
1024 err = ceph_mount(client, mnt, path);
1025 if (err < 0)
1026 goto out_splat;
1027 dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
1028 mnt->mnt_root->d_inode, ceph_vinop(mnt->mnt_root->d_inode));
1029 return 0;
1030
1031out_splat:
1032 ceph_mdsc_close_sessions(&client->mdsc);
3981f2e2 1033 deactivate_locked_super(sb);
16725b9d
SW
1034 goto out_final;
1035
1036out:
1037 ceph_destroy_client(client);
1038out_final:
1039 dout("ceph_get_sb fail %d\n", err);
1040 return err;
1041}
1042
1043static void ceph_kill_sb(struct super_block *s)
1044{
1045 struct ceph_client *client = ceph_sb_to_client(s);
1046 dout("kill_sb %p\n", s);
1047 ceph_mdsc_pre_umount(&client->mdsc);
16725b9d 1048 kill_anon_super(s); /* will call put_super after sb is r/o */
16725b9d
SW
1049 ceph_destroy_client(client);
1050}
1051
1052static struct file_system_type ceph_fs_type = {
1053 .owner = THIS_MODULE,
1054 .name = "ceph",
1055 .get_sb = ceph_get_sb,
1056 .kill_sb = ceph_kill_sb,
1057 .fs_flags = FS_RENAME_DOES_D_MOVE,
1058};
1059
1060#define _STRINGIFY(x) #x
1061#define STRINGIFY(x) _STRINGIFY(x)
1062
1063static int __init init_ceph(void)
1064{
1065 int ret = 0;
1066
1067 ret = ceph_debugfs_init();
1068 if (ret < 0)
1069 goto out;
1070
1071 ret = ceph_msgr_init();
1072 if (ret < 0)
1073 goto out_debugfs;
1074
1075 ret = init_caches();
1076 if (ret)
1077 goto out_msgr;
1078
16725b9d
SW
1079 ret = register_filesystem(&ceph_fs_type);
1080 if (ret)
1081 goto out_icache;
1082
c8f16584
SW
1083 pr_info("loaded (mon/mds/osd proto %d/%d/%d, osdmap %d/%d %d/%d)\n",
1084 CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL,
1085 CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
1086 CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
16725b9d
SW
1087 return 0;
1088
1089out_icache:
1090 destroy_caches();
1091out_msgr:
1092 ceph_msgr_exit();
1093out_debugfs:
1094 ceph_debugfs_cleanup();
1095out:
1096 return ret;
1097}
1098
1099static void __exit exit_ceph(void)
1100{
1101 dout("exit_ceph\n");
1102 unregister_filesystem(&ceph_fs_type);
16725b9d
SW
1103 destroy_caches();
1104 ceph_msgr_exit();
1105 ceph_debugfs_cleanup();
1106}
1107
1108module_init(init_ceph);
1109module_exit(exit_ceph);
1110
1111MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1112MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1113MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1114MODULE_DESCRIPTION("Ceph filesystem for Linux");
1115MODULE_LICENSE("GPL");