]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/staging/zram/zram_drv.c
3f778434dc9bbe5239918eb91c4fe92f53c96655
[net-next-2.6.git] / drivers / staging / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the licence that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  *
12  * Project home: http://compcache.googlecode.com
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bitops.h>
21 #include <linux/blkdev.h>
22 #include <linux/buffer_head.h>
23 #include <linux/device.h>
24 #include <linux/genhd.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30
31 #include "zram_drv.h"
32
33 /* Globals */
34 static int zram_major;
35 static struct zram *devices;
36
37 /* Module params (documentation at end) */
38 static unsigned int num_devices;
39
40 static int zram_test_flag(struct zram *zram, u32 index,
41                         enum zram_pageflags flag)
42 {
43         return zram->table[index].flags & BIT(flag);
44 }
45
46 static void zram_set_flag(struct zram *zram, u32 index,
47                         enum zram_pageflags flag)
48 {
49         zram->table[index].flags |= BIT(flag);
50 }
51
52 static void zram_clear_flag(struct zram *zram, u32 index,
53                         enum zram_pageflags flag)
54 {
55         zram->table[index].flags &= ~BIT(flag);
56 }
57
58 static int page_zero_filled(void *ptr)
59 {
60         unsigned int pos;
61         unsigned long *page;
62
63         page = (unsigned long *)ptr;
64
65         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
66                 if (page[pos])
67                         return 0;
68         }
69
70         return 1;
71 }
72
73 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
74 {
75         if (!zram->disksize) {
76                 pr_info(
77                 "disk size not provided. You can use disksize_kb module "
78                 "param to specify size.\nUsing default: (%u%% of RAM).\n",
79                 default_disksize_perc_ram
80                 );
81                 zram->disksize = default_disksize_perc_ram *
82                                         (totalram_bytes / 100);
83         }
84
85         if (zram->disksize > 2 * (totalram_bytes)) {
86                 pr_info(
87                 "There is little point creating a zram of greater than "
88                 "twice the size of memory since we expect a 2:1 compression "
89                 "ratio. Note that zram uses about 0.1%% of the size of "
90                 "the disk when not in use so a huge zram is "
91                 "wasteful.\n"
92                 "\tMemory Size: %zu kB\n"
93                 "\tSize you selected: %zu kB\n"
94                 "Continuing anyway ...\n",
95                 totalram_bytes >> 10, zram->disksize
96                 );
97         }
98
99         zram->disksize &= PAGE_MASK;
100 }
101
102 static void zram_ioctl_get_stats(struct zram *zram,
103                         struct zram_ioctl_stats *s)
104 {
105         s->disksize = zram->disksize;
106
107 #if defined(CONFIG_ZRAM_STATS)
108         {
109         struct zram_stats *rs = &zram->stats;
110         size_t succ_writes, mem_used;
111         unsigned int good_compress_perc = 0, no_compress_perc = 0;
112
113         mem_used = xv_get_total_size_bytes(zram->mem_pool)
114                         + (rs->pages_expand << PAGE_SHIFT);
115         succ_writes = zram_stat64_read(zram, &rs->num_writes) -
116                         zram_stat64_read(zram, &rs->failed_writes);
117
118         if (succ_writes && rs->pages_stored) {
119                 good_compress_perc = rs->good_compress * 100
120                                         / rs->pages_stored;
121                 no_compress_perc = rs->pages_expand * 100
122                                         / rs->pages_stored;
123         }
124
125         s->num_reads = zram_stat64_read(zram, &rs->num_reads);
126         s->num_writes = zram_stat64_read(zram, &rs->num_writes);
127         s->failed_reads = zram_stat64_read(zram, &rs->failed_reads);
128         s->failed_writes = zram_stat64_read(zram, &rs->failed_writes);
129         s->invalid_io = zram_stat64_read(zram, &rs->invalid_io);
130         s->notify_free = zram_stat64_read(zram, &rs->notify_free);
131         s->pages_zero = rs->pages_zero;
132
133         s->good_compress_pct = good_compress_perc;
134         s->pages_expand_pct = no_compress_perc;
135
136         s->pages_stored = rs->pages_stored;
137         s->pages_used = mem_used >> PAGE_SHIFT;
138         s->orig_data_size = rs->pages_stored << PAGE_SHIFT;
139         s->compr_data_size = rs->compr_size;
140         s->mem_used_total = mem_used;
141         }
142 #endif /* CONFIG_ZRAM_STATS */
143 }
144
145 static void zram_free_page(struct zram *zram, size_t index)
146 {
147         u32 clen;
148         void *obj;
149
150         struct page *page = zram->table[index].page;
151         u32 offset = zram->table[index].offset;
152
153         if (unlikely(!page)) {
154                 /*
155                  * No memory is allocated for zero filled pages.
156                  * Simply clear zero page flag.
157                  */
158                 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
159                         zram_clear_flag(zram, index, ZRAM_ZERO);
160                         zram_stat_dec(&zram->stats.pages_zero);
161                 }
162                 return;
163         }
164
165         if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
166                 clen = PAGE_SIZE;
167                 __free_page(page);
168                 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
169                 zram_stat_dec(&zram->stats.pages_expand);
170                 goto out;
171         }
172
173         obj = kmap_atomic(page, KM_USER0) + offset;
174         clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
175         kunmap_atomic(obj, KM_USER0);
176
177         xv_free(zram->mem_pool, page, offset);
178         if (clen <= PAGE_SIZE / 2)
179                 zram_stat_dec(&zram->stats.good_compress);
180
181 out:
182         zram->stats.compr_size -= clen;
183         zram_stat_dec(&zram->stats.pages_stored);
184
185         zram->table[index].page = NULL;
186         zram->table[index].offset = 0;
187 }
188
189 static void handle_zero_page(struct page *page)
190 {
191         void *user_mem;
192
193         user_mem = kmap_atomic(page, KM_USER0);
194         memset(user_mem, 0, PAGE_SIZE);
195         kunmap_atomic(user_mem, KM_USER0);
196
197         flush_dcache_page(page);
198 }
199
200 static void handle_uncompressed_page(struct zram *zram,
201                                 struct page *page, u32 index)
202 {
203         unsigned char *user_mem, *cmem;
204
205         user_mem = kmap_atomic(page, KM_USER0);
206         cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
207                         zram->table[index].offset;
208
209         memcpy(user_mem, cmem, PAGE_SIZE);
210         kunmap_atomic(user_mem, KM_USER0);
211         kunmap_atomic(cmem, KM_USER1);
212
213         flush_dcache_page(page);
214 }
215
216 static int zram_read(struct zram *zram, struct bio *bio)
217 {
218
219         int i;
220         u32 index;
221         struct bio_vec *bvec;
222
223         zram_stat64_inc(zram, &zram->stats.num_reads);
224
225         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
226         bio_for_each_segment(bvec, bio, i) {
227                 int ret;
228                 size_t clen;
229                 struct page *page;
230                 struct zobj_header *zheader;
231                 unsigned char *user_mem, *cmem;
232
233                 page = bvec->bv_page;
234
235                 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
236                         handle_zero_page(page);
237                         continue;
238                 }
239
240                 /* Requested page is not present in compressed area */
241                 if (unlikely(!zram->table[index].page)) {
242                         pr_debug("Read before write: sector=%lu, size=%u",
243                                 (ulong)(bio->bi_sector), bio->bi_size);
244                         /* Do nothing */
245                         continue;
246                 }
247
248                 /* Page is stored uncompressed since it's incompressible */
249                 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
250                         handle_uncompressed_page(zram, page, index);
251                         continue;
252                 }
253
254                 user_mem = kmap_atomic(page, KM_USER0);
255                 clen = PAGE_SIZE;
256
257                 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
258                                 zram->table[index].offset;
259
260                 ret = lzo1x_decompress_safe(
261                         cmem + sizeof(*zheader),
262                         xv_get_object_size(cmem) - sizeof(*zheader),
263                         user_mem, &clen);
264
265                 kunmap_atomic(user_mem, KM_USER0);
266                 kunmap_atomic(cmem, KM_USER1);
267
268                 /* Should NEVER happen. Return bio error if it does. */
269                 if (unlikely(ret != LZO_E_OK)) {
270                         pr_err("Decompression failed! err=%d, page=%u\n",
271                                 ret, index);
272                         zram_stat64_inc(zram, &zram->stats.failed_reads);
273                         goto out;
274                 }
275
276                 flush_dcache_page(page);
277                 index++;
278         }
279
280         set_bit(BIO_UPTODATE, &bio->bi_flags);
281         bio_endio(bio, 0);
282         return 0;
283
284 out:
285         bio_io_error(bio);
286         return 0;
287 }
288
289 static int zram_write(struct zram *zram, struct bio *bio)
290 {
291         int i;
292         u32 index;
293         struct bio_vec *bvec;
294
295         zram_stat64_inc(zram, &zram->stats.num_writes);
296
297         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
298
299         bio_for_each_segment(bvec, bio, i) {
300                 int ret;
301                 u32 offset;
302                 size_t clen;
303                 struct zobj_header *zheader;
304                 struct page *page, *page_store;
305                 unsigned char *user_mem, *cmem, *src;
306
307                 page = bvec->bv_page;
308                 src = zram->compress_buffer;
309
310                 /*
311                  * System overwrites unused sectors. Free memory associated
312                  * with this sector now.
313                  */
314                 if (zram->table[index].page ||
315                                 zram_test_flag(zram, index, ZRAM_ZERO))
316                         zram_free_page(zram, index);
317
318                 mutex_lock(&zram->lock);
319
320                 user_mem = kmap_atomic(page, KM_USER0);
321                 if (page_zero_filled(user_mem)) {
322                         kunmap_atomic(user_mem, KM_USER0);
323                         mutex_unlock(&zram->lock);
324                         zram_stat_inc(&zram->stats.pages_zero);
325                         zram_set_flag(zram, index, ZRAM_ZERO);
326                         continue;
327                 }
328
329                 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
330                                         zram->compress_workmem);
331
332                 kunmap_atomic(user_mem, KM_USER0);
333
334                 if (unlikely(ret != LZO_E_OK)) {
335                         mutex_unlock(&zram->lock);
336                         pr_err("Compression failed! err=%d\n", ret);
337                         zram_stat64_inc(zram, &zram->stats.failed_writes);
338                         goto out;
339                 }
340
341                 /*
342                  * Page is incompressible. Store it as-is (uncompressed)
343                  * since we do not want to return too many disk write
344                  * errors which has side effect of hanging the system.
345                  */
346                 if (unlikely(clen > max_zpage_size)) {
347                         clen = PAGE_SIZE;
348                         page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
349                         if (unlikely(!page_store)) {
350                                 mutex_unlock(&zram->lock);
351                                 pr_info("Error allocating memory for "
352                                         "incompressible page: %u\n", index);
353                                 zram_stat64_inc(zram,
354                                         &zram->stats.failed_writes);
355                                 goto out;
356                         }
357
358                         offset = 0;
359                         zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
360                         zram_stat_inc(&zram->stats.pages_expand);
361                         zram->table[index].page = page_store;
362                         src = kmap_atomic(page, KM_USER0);
363                         goto memstore;
364                 }
365
366                 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
367                                 &zram->table[index].page, &offset,
368                                 GFP_NOIO | __GFP_HIGHMEM)) {
369                         mutex_unlock(&zram->lock);
370                         pr_info("Error allocating memory for compressed "
371                                 "page: %u, size=%zu\n", index, clen);
372                         zram_stat64_inc(zram, &zram->stats.failed_writes);
373                         goto out;
374                 }
375
376 memstore:
377                 zram->table[index].offset = offset;
378
379                 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
380                                 zram->table[index].offset;
381
382 #if 0
383                 /* Back-reference needed for memory defragmentation */
384                 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
385                         zheader = (struct zobj_header *)cmem;
386                         zheader->table_idx = index;
387                         cmem += sizeof(*zheader);
388                 }
389 #endif
390
391                 memcpy(cmem, src, clen);
392
393                 kunmap_atomic(cmem, KM_USER1);
394                 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
395                         kunmap_atomic(src, KM_USER0);
396
397                 /* Update stats */
398                 zram->stats.compr_size += clen;
399                 zram_stat_inc(&zram->stats.pages_stored);
400                 if (clen <= PAGE_SIZE / 2)
401                         zram_stat_inc(&zram->stats.good_compress);
402
403                 mutex_unlock(&zram->lock);
404                 index++;
405         }
406
407         set_bit(BIO_UPTODATE, &bio->bi_flags);
408         bio_endio(bio, 0);
409         return 0;
410
411 out:
412         bio_io_error(bio);
413         return 0;
414 }
415
416 /*
417  * Check if request is within bounds and page aligned.
418  */
419 static inline int valid_io_request(struct zram *zram, struct bio *bio)
420 {
421         if (unlikely(
422                 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
423                 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
424                 (bio->bi_size & (PAGE_SIZE - 1)))) {
425
426                 return 0;
427         }
428
429         /* I/O request is valid */
430         return 1;
431 }
432
433 /*
434  * Handler function for all zram I/O requests.
435  */
436 static int zram_make_request(struct request_queue *queue, struct bio *bio)
437 {
438         int ret = 0;
439         struct zram *zram = queue->queuedata;
440
441         if (unlikely(!zram->init_done)) {
442                 bio_io_error(bio);
443                 return 0;
444         }
445
446         if (!valid_io_request(zram, bio)) {
447                 zram_stat64_inc(zram, &zram->stats.invalid_io);
448                 bio_io_error(bio);
449                 return 0;
450         }
451
452         switch (bio_data_dir(bio)) {
453         case READ:
454                 ret = zram_read(zram, bio);
455                 break;
456
457         case WRITE:
458                 ret = zram_write(zram, bio);
459                 break;
460         }
461
462         return ret;
463 }
464
465 static void reset_device(struct zram *zram)
466 {
467         size_t index;
468
469         /* Do not accept any new I/O request */
470         zram->init_done = 0;
471
472         /* Free various per-device buffers */
473         kfree(zram->compress_workmem);
474         free_pages((unsigned long)zram->compress_buffer, 1);
475
476         zram->compress_workmem = NULL;
477         zram->compress_buffer = NULL;
478
479         /* Free all pages that are still in this zram device */
480         for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
481                 struct page *page;
482                 u16 offset;
483
484                 page = zram->table[index].page;
485                 offset = zram->table[index].offset;
486
487                 if (!page)
488                         continue;
489
490                 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
491                         __free_page(page);
492                 else
493                         xv_free(zram->mem_pool, page, offset);
494         }
495
496         vfree(zram->table);
497         zram->table = NULL;
498
499         xv_destroy_pool(zram->mem_pool);
500         zram->mem_pool = NULL;
501
502         /* Reset stats */
503         memset(&zram->stats, 0, sizeof(zram->stats));
504
505         zram->disksize = 0;
506 }
507
508 static int zram_ioctl_init_device(struct zram *zram)
509 {
510         int ret;
511         size_t num_pages;
512
513         if (zram->init_done) {
514                 pr_info("Device already initialized!\n");
515                 return -EBUSY;
516         }
517
518         zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
519
520         zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
521         if (!zram->compress_workmem) {
522                 pr_err("Error allocating compressor working memory!\n");
523                 ret = -ENOMEM;
524                 goto fail;
525         }
526
527         zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
528         if (!zram->compress_buffer) {
529                 pr_err("Error allocating compressor buffer space\n");
530                 ret = -ENOMEM;
531                 goto fail;
532         }
533
534         num_pages = zram->disksize >> PAGE_SHIFT;
535         zram->table = vmalloc(num_pages * sizeof(*zram->table));
536         if (!zram->table) {
537                 pr_err("Error allocating zram address table\n");
538                 /* To prevent accessing table entries during cleanup */
539                 zram->disksize = 0;
540                 ret = -ENOMEM;
541                 goto fail;
542         }
543         memset(zram->table, 0, num_pages * sizeof(*zram->table));
544
545         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
546
547         /* zram devices sort of resembles non-rotational disks */
548         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
549
550         zram->mem_pool = xv_create_pool();
551         if (!zram->mem_pool) {
552                 pr_err("Error creating memory pool\n");
553                 ret = -ENOMEM;
554                 goto fail;
555         }
556
557         zram->init_done = 1;
558
559         pr_debug("Initialization done!\n");
560         return 0;
561
562 fail:
563         reset_device(zram);
564
565         pr_err("Initialization failed: err=%d\n", ret);
566         return ret;
567 }
568
569 static int zram_ioctl_reset_device(struct zram *zram)
570 {
571         if (zram->init_done)
572                 reset_device(zram);
573
574         return 0;
575 }
576
577 static int zram_ioctl(struct block_device *bdev, fmode_t mode,
578                         unsigned int cmd, unsigned long arg)
579 {
580         int ret = 0;
581         size_t disksize_kb;
582
583         struct zram *zram = bdev->bd_disk->private_data;
584
585         switch (cmd) {
586         case ZRAMIO_SET_DISKSIZE_KB:
587                 if (zram->init_done) {
588                         ret = -EBUSY;
589                         goto out;
590                 }
591                 if (copy_from_user(&disksize_kb, (void *)arg,
592                                                 _IOC_SIZE(cmd))) {
593                         ret = -EFAULT;
594                         goto out;
595                 }
596                 zram->disksize = disksize_kb << 10;
597                 pr_info("Disk size set to %zu kB\n", disksize_kb);
598                 break;
599
600         case ZRAMIO_GET_STATS:
601         {
602                 struct zram_ioctl_stats *stats;
603                 if (!zram->init_done) {
604                         ret = -ENOTTY;
605                         goto out;
606                 }
607                 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
608                 if (!stats) {
609                         ret = -ENOMEM;
610                         goto out;
611                 }
612                 zram_ioctl_get_stats(zram, stats);
613                 if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
614                         kfree(stats);
615                         ret = -EFAULT;
616                         goto out;
617                 }
618                 kfree(stats);
619                 break;
620         }
621         case ZRAMIO_INIT:
622                 ret = zram_ioctl_init_device(zram);
623                 break;
624
625         case ZRAMIO_RESET:
626                 /* Do not reset an active device! */
627                 if (bdev->bd_holders) {
628                         ret = -EBUSY;
629                         goto out;
630                 }
631
632                 /* Make sure all pending I/O is finished */
633                 if (bdev)
634                         fsync_bdev(bdev);
635
636                 ret = zram_ioctl_reset_device(zram);
637                 break;
638
639         default:
640                 pr_info("Invalid ioctl %u\n", cmd);
641                 ret = -ENOTTY;
642         }
643
644 out:
645         return ret;
646 }
647
648 void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
649 {
650         struct zram *zram;
651
652         zram = bdev->bd_disk->private_data;
653         zram_free_page(zram, index);
654         zram_stat64_inc(zram, &zram->stats.notify_free);
655 }
656
657 static const struct block_device_operations zram_devops = {
658         .ioctl = zram_ioctl,
659         .swap_slot_free_notify = zram_slot_free_notify,
660         .owner = THIS_MODULE
661 };
662
663 static int create_device(struct zram *zram, int device_id)
664 {
665         int ret = 0;
666
667         mutex_init(&zram->lock);
668         spin_lock_init(&zram->stat64_lock);
669
670         zram->queue = blk_alloc_queue(GFP_KERNEL);
671         if (!zram->queue) {
672                 pr_err("Error allocating disk queue for device %d\n",
673                         device_id);
674                 ret = -ENOMEM;
675                 goto out;
676         }
677
678         blk_queue_make_request(zram->queue, zram_make_request);
679         zram->queue->queuedata = zram;
680
681          /* gendisk structure */
682         zram->disk = alloc_disk(1);
683         if (!zram->disk) {
684                 blk_cleanup_queue(zram->queue);
685                 pr_warning("Error allocating disk structure for device %d\n",
686                         device_id);
687                 ret = -ENOMEM;
688                 goto out;
689         }
690
691         zram->disk->major = zram_major;
692         zram->disk->first_minor = device_id;
693         zram->disk->fops = &zram_devops;
694         zram->disk->queue = zram->queue;
695         zram->disk->private_data = zram;
696         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
697
698         /* Actual capacity set using ZRAMIO_SET_DISKSIZE_KB ioctl */
699         set_capacity(zram->disk, 0);
700
701         /*
702          * To ensure that we always get PAGE_SIZE aligned
703          * and n*PAGE_SIZED sized I/O requests.
704          */
705         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
706         blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
707         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
708         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
709
710         add_disk(zram->disk);
711
712         zram->init_done = 0;
713
714 out:
715         return ret;
716 }
717
718 static void destroy_device(struct zram *zram)
719 {
720         if (zram->disk) {
721                 del_gendisk(zram->disk);
722                 put_disk(zram->disk);
723         }
724
725         if (zram->queue)
726                 blk_cleanup_queue(zram->queue);
727 }
728
729 static int __init zram_init(void)
730 {
731         int ret, dev_id;
732
733         if (num_devices > max_num_devices) {
734                 pr_warning("Invalid value for num_devices: %u\n",
735                                 num_devices);
736                 ret = -EINVAL;
737                 goto out;
738         }
739
740         zram_major = register_blkdev(0, "zram");
741         if (zram_major <= 0) {
742                 pr_warning("Unable to get major number\n");
743                 ret = -EBUSY;
744                 goto out;
745         }
746
747         if (!num_devices) {
748                 pr_info("num_devices not specified. Using default: 1\n");
749                 num_devices = 1;
750         }
751
752         /* Allocate the device array and initialize each one */
753         pr_info("Creating %u devices ...\n", num_devices);
754         devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
755         if (!devices) {
756                 ret = -ENOMEM;
757                 goto unregister;
758         }
759
760         for (dev_id = 0; dev_id < num_devices; dev_id++) {
761                 ret = create_device(&devices[dev_id], dev_id);
762                 if (ret)
763                         goto free_devices;
764         }
765
766         return 0;
767
768 free_devices:
769         while (dev_id)
770                 destroy_device(&devices[--dev_id]);
771 unregister:
772         unregister_blkdev(zram_major, "zram");
773 out:
774         return ret;
775 }
776
777 static void __exit zram_exit(void)
778 {
779         int i;
780         struct zram *zram;
781
782         for (i = 0; i < num_devices; i++) {
783                 zram = &devices[i];
784
785                 destroy_device(zram);
786                 if (zram->init_done)
787                         reset_device(zram);
788         }
789
790         unregister_blkdev(zram_major, "zram");
791
792         kfree(devices);
793         pr_debug("Cleanup done!\n");
794 }
795
796 module_param(num_devices, uint, 0);
797 MODULE_PARM_DESC(num_devices, "Number of zram devices");
798
799 module_init(zram_init);
800 module_exit(zram_exit);
801
802 MODULE_LICENSE("Dual BSD/GPL");
803 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
804 MODULE_DESCRIPTION("Compressed RAM Block Device");