]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ubifs/io.c
UBIFS: add new flash file system
[net-next-2.6.git] / fs / ubifs / io.c
CommitLineData
1e51764a
AB
1/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation.
5 * Copyright (C) 2006, 2007 University of Szeged, Hungary
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 51
18 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Authors: Artem Bityutskiy (Битюцкий Артём)
21 * Adrian Hunter
22 * Zoltan Sogor
23 */
24
25/*
26 * This file implements UBIFS I/O subsystem which provides various I/O-related
27 * helper functions (reading/writing/checking/validating nodes) and implements
28 * write-buffering support. Write buffers help to save space which otherwise
29 * would have been wasted for padding to the nearest minimal I/O unit boundary.
30 * Instead, data first goes to the write-buffer and is flushed when the
31 * buffer is full or when it is not used for some time (by timer). This is
32 * similarto the mechanism is used by JFFS2.
33 *
34 * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by
35 * mutexes defined inside these objects. Since sometimes upper-level code
36 * has to lock the write-buffer (e.g. journal space reservation code), many
37 * functions related to write-buffers have "nolock" suffix which means that the
38 * caller has to lock the write-buffer before calling this function.
39 *
40 * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not
41 * aligned, UBIFS starts the next node from the aligned address, and the padded
42 * bytes may contain any rubbish. In other words, UBIFS does not put padding
43 * bytes in those small gaps. Common headers of nodes store real node lengths,
44 * not aligned lengths. Indexing nodes also store real lengths in branches.
45 *
46 * UBIFS uses padding when it pads to the next min. I/O unit. In this case it
47 * uses padding nodes or padding bytes, if the padding node does not fit.
48 *
49 * All UBIFS nodes are protected by CRC checksums and UBIFS checks all nodes
50 * every time they are read from the flash media.
51 */
52
53#include <linux/crc32.h>
54#include "ubifs.h"
55
56/**
57 * ubifs_check_node - check node.
58 * @c: UBIFS file-system description object
59 * @buf: node to check
60 * @lnum: logical eraseblock number
61 * @offs: offset within the logical eraseblock
62 * @quiet: print no messages
63 *
64 * This function checks node magic number and CRC checksum. This function also
65 * validates node length to prevent UBIFS from becoming crazy when an attacker
66 * feeds it a file-system image with incorrect nodes. For example, too large
67 * node length in the common header could cause UBIFS to read memory outside of
68 * allocated buffer when checking the CRC checksum.
69 *
70 * This function returns zero in case of success %-EUCLEAN in case of bad CRC
71 * or magic.
72 */
73int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
74 int offs, int quiet)
75{
76 int err = -EINVAL, type, node_len;
77 uint32_t crc, node_crc, magic;
78 const struct ubifs_ch *ch = buf;
79
80 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
81 ubifs_assert(!(offs & 7) && offs < c->leb_size);
82
83 magic = le32_to_cpu(ch->magic);
84 if (magic != UBIFS_NODE_MAGIC) {
85 if (!quiet)
86 ubifs_err("bad magic %#08x, expected %#08x",
87 magic, UBIFS_NODE_MAGIC);
88 err = -EUCLEAN;
89 goto out;
90 }
91
92 type = ch->node_type;
93 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
94 if (!quiet)
95 ubifs_err("bad node type %d", type);
96 goto out;
97 }
98
99 node_len = le32_to_cpu(ch->len);
100 if (node_len + offs > c->leb_size)
101 goto out_len;
102
103 if (c->ranges[type].max_len == 0) {
104 if (node_len != c->ranges[type].len)
105 goto out_len;
106 } else if (node_len < c->ranges[type].min_len ||
107 node_len > c->ranges[type].max_len)
108 goto out_len;
109
110 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
111 node_crc = le32_to_cpu(ch->crc);
112 if (crc != node_crc) {
113 if (!quiet)
114 ubifs_err("bad CRC: calculated %#08x, read %#08x",
115 crc, node_crc);
116 err = -EUCLEAN;
117 goto out;
118 }
119
120 return 0;
121
122out_len:
123 if (!quiet)
124 ubifs_err("bad node length %d", node_len);
125out:
126 if (!quiet) {
127 ubifs_err("bad node at LEB %d:%d", lnum, offs);
128 dbg_dump_node(c, buf);
129 dbg_dump_stack();
130 }
131 return err;
132}
133
134/**
135 * ubifs_pad - pad flash space.
136 * @c: UBIFS file-system description object
137 * @buf: buffer to put padding to
138 * @pad: how many bytes to pad
139 *
140 * The flash media obliges us to write only in chunks of %c->min_io_size and
141 * when we have to write less data we add padding node to the write-buffer and
142 * pad it to the next minimal I/O unit's boundary. Padding nodes help when the
143 * media is being scanned. If the amount of wasted space is not enough to fit a
144 * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes
145 * pattern (%UBIFS_PADDING_BYTE).
146 *
147 * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is
148 * used.
149 */
150void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
151{
152 uint32_t crc;
153
154 ubifs_assert(pad >= 0 && !(pad & 7));
155
156 if (pad >= UBIFS_PAD_NODE_SZ) {
157 struct ubifs_ch *ch = buf;
158 struct ubifs_pad_node *pad_node = buf;
159
160 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
161 ch->node_type = UBIFS_PAD_NODE;
162 ch->group_type = UBIFS_NO_NODE_GROUP;
163 ch->padding[0] = ch->padding[1] = 0;
164 ch->sqnum = 0;
165 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
166 pad -= UBIFS_PAD_NODE_SZ;
167 pad_node->pad_len = cpu_to_le32(pad);
168 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
169 ch->crc = cpu_to_le32(crc);
170 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
171 } else if (pad > 0)
172 /* Too little space, padding node won't fit */
173 memset(buf, UBIFS_PADDING_BYTE, pad);
174}
175
176/**
177 * next_sqnum - get next sequence number.
178 * @c: UBIFS file-system description object
179 */
180static unsigned long long next_sqnum(struct ubifs_info *c)
181{
182 unsigned long long sqnum;
183
184 spin_lock(&c->cnt_lock);
185 sqnum = ++c->max_sqnum;
186 spin_unlock(&c->cnt_lock);
187
188 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
189 if (sqnum >= SQNUM_WATERMARK) {
190 ubifs_err("sequence number overflow %llu, end of life",
191 sqnum);
192 ubifs_ro_mode(c, -EINVAL);
193 }
194 ubifs_warn("running out of sequence numbers, end of life soon");
195 }
196
197 return sqnum;
198}
199
200/**
201 * ubifs_prepare_node - prepare node to be written to flash.
202 * @c: UBIFS file-system description object
203 * @node: the node to pad
204 * @len: node length
205 * @pad: if the buffer has to be padded
206 *
207 * This function prepares node at @node to be written to the media - it
208 * calculates node CRC, fills the common header, and adds proper padding up to
209 * the next minimum I/O unit if @pad is not zero.
210 */
211void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
212{
213 uint32_t crc;
214 struct ubifs_ch *ch = node;
215 unsigned long long sqnum = next_sqnum(c);
216
217 ubifs_assert(len >= UBIFS_CH_SZ);
218
219 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
220 ch->len = cpu_to_le32(len);
221 ch->group_type = UBIFS_NO_NODE_GROUP;
222 ch->sqnum = cpu_to_le64(sqnum);
223 ch->padding[0] = ch->padding[1] = 0;
224 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
225 ch->crc = cpu_to_le32(crc);
226
227 if (pad) {
228 len = ALIGN(len, 8);
229 pad = ALIGN(len, c->min_io_size) - len;
230 ubifs_pad(c, node + len, pad);
231 }
232}
233
234/**
235 * ubifs_prep_grp_node - prepare node of a group to be written to flash.
236 * @c: UBIFS file-system description object
237 * @node: the node to pad
238 * @len: node length
239 * @last: indicates the last node of the group
240 *
241 * This function prepares node at @node to be written to the media - it
242 * calculates node CRC and fills the common header.
243 */
244void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
245{
246 uint32_t crc;
247 struct ubifs_ch *ch = node;
248 unsigned long long sqnum = next_sqnum(c);
249
250 ubifs_assert(len >= UBIFS_CH_SZ);
251
252 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
253 ch->len = cpu_to_le32(len);
254 if (last)
255 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
256 else
257 ch->group_type = UBIFS_IN_NODE_GROUP;
258 ch->sqnum = cpu_to_le64(sqnum);
259 ch->padding[0] = ch->padding[1] = 0;
260 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
261 ch->crc = cpu_to_le32(crc);
262}
263
264/**
265 * wbuf_timer_callback - write-buffer timer callback function.
266 * @data: timer data (write-buffer descriptor)
267 *
268 * This function is called when the write-buffer timer expires.
269 */
270static void wbuf_timer_callback_nolock(unsigned long data)
271{
272 struct ubifs_wbuf *wbuf = (struct ubifs_wbuf *)data;
273
274 wbuf->need_sync = 1;
275 wbuf->c->need_wbuf_sync = 1;
276 ubifs_wake_up_bgt(wbuf->c);
277}
278
279/**
280 * new_wbuf_timer - start new write-buffer timer.
281 * @wbuf: write-buffer descriptor
282 */
283static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
284{
285 ubifs_assert(!timer_pending(&wbuf->timer));
286
287 if (!wbuf->timeout)
288 return;
289
290 wbuf->timer.expires = jiffies + wbuf->timeout;
291 add_timer(&wbuf->timer);
292}
293
294/**
295 * cancel_wbuf_timer - cancel write-buffer timer.
296 * @wbuf: write-buffer descriptor
297 */
298static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
299{
300 /*
301 * If the syncer is waiting for the lock (from the background thread's
302 * context) and another task is changing write-buffer then the syncing
303 * should be canceled.
304 */
305 wbuf->need_sync = 0;
306 del_timer(&wbuf->timer);
307}
308
309/**
310 * ubifs_wbuf_sync_nolock - synchronize write-buffer.
311 * @wbuf: write-buffer to synchronize
312 *
313 * This function synchronizes write-buffer @buf and returns zero in case of
314 * success or a negative error code in case of failure.
315 */
316int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
317{
318 struct ubifs_info *c = wbuf->c;
319 int err, dirt;
320
321 cancel_wbuf_timer_nolock(wbuf);
322 if (!wbuf->used || wbuf->lnum == -1)
323 /* Write-buffer is empty or not seeked */
324 return 0;
325
326 dbg_io("LEB %d:%d, %d bytes",
327 wbuf->lnum, wbuf->offs, wbuf->used);
328 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
329 ubifs_assert(!(wbuf->avail & 7));
330 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
331
332 if (c->ro_media)
333 return -EROFS;
334
335 ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail);
336 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
337 c->min_io_size, wbuf->dtype);
338 if (err) {
339 ubifs_err("cannot write %d bytes to LEB %d:%d",
340 c->min_io_size, wbuf->lnum, wbuf->offs);
341 dbg_dump_stack();
342 return err;
343 }
344
345 dirt = wbuf->avail;
346
347 spin_lock(&wbuf->lock);
348 wbuf->offs += c->min_io_size;
349 wbuf->avail = c->min_io_size;
350 wbuf->used = 0;
351 wbuf->next_ino = 0;
352 spin_unlock(&wbuf->lock);
353
354 if (wbuf->sync_callback)
355 err = wbuf->sync_callback(c, wbuf->lnum,
356 c->leb_size - wbuf->offs, dirt);
357 return err;
358}
359
360/**
361 * ubifs_wbuf_seek_nolock - seek write-buffer.
362 * @wbuf: write-buffer
363 * @lnum: logical eraseblock number to seek to
364 * @offs: logical eraseblock offset to seek to
365 * @dtype: data type
366 *
367 * This function targets the write buffer to logical eraseblock @lnum:@offs.
368 * The write-buffer is synchronized if it is not empty. Returns zero in case of
369 * success and a negative error code in case of failure.
370 */
371int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
372 int dtype)
373{
374 const struct ubifs_info *c = wbuf->c;
375
376 dbg_io("LEB %d:%d", lnum, offs);
377 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
378 ubifs_assert(offs >= 0 && offs <= c->leb_size);
379 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
380 ubifs_assert(lnum != wbuf->lnum);
381
382 if (wbuf->used > 0) {
383 int err = ubifs_wbuf_sync_nolock(wbuf);
384
385 if (err)
386 return err;
387 }
388
389 spin_lock(&wbuf->lock);
390 wbuf->lnum = lnum;
391 wbuf->offs = offs;
392 wbuf->avail = c->min_io_size;
393 wbuf->used = 0;
394 spin_unlock(&wbuf->lock);
395 wbuf->dtype = dtype;
396
397 return 0;
398}
399
400/**
401 * ubifs_bg_wbufs_sync - synchronize write-buffers.
402 * @c: UBIFS file-system description object
403 *
404 * This function is called by background thread to synchronize write-buffers.
405 * Returns zero in case of success and a negative error code in case of
406 * failure.
407 */
408int ubifs_bg_wbufs_sync(struct ubifs_info *c)
409{
410 int err, i;
411
412 if (!c->need_wbuf_sync)
413 return 0;
414 c->need_wbuf_sync = 0;
415
416 if (c->ro_media) {
417 err = -EROFS;
418 goto out_timers;
419 }
420
421 dbg_io("synchronize");
422 for (i = 0; i < c->jhead_cnt; i++) {
423 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
424
425 cond_resched();
426
427 /*
428 * If the mutex is locked then wbuf is being changed, so
429 * synchronization is not necessary.
430 */
431 if (mutex_is_locked(&wbuf->io_mutex))
432 continue;
433
434 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
435 if (!wbuf->need_sync) {
436 mutex_unlock(&wbuf->io_mutex);
437 continue;
438 }
439
440 err = ubifs_wbuf_sync_nolock(wbuf);
441 mutex_unlock(&wbuf->io_mutex);
442 if (err) {
443 ubifs_err("cannot sync write-buffer, error %d", err);
444 ubifs_ro_mode(c, err);
445 goto out_timers;
446 }
447 }
448
449 return 0;
450
451out_timers:
452 /* Cancel all timers to prevent repeated errors */
453 for (i = 0; i < c->jhead_cnt; i++) {
454 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
455
456 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
457 cancel_wbuf_timer_nolock(wbuf);
458 mutex_unlock(&wbuf->io_mutex);
459 }
460 return err;
461}
462
463/**
464 * ubifs_wbuf_write_nolock - write data to flash via write-buffer.
465 * @wbuf: write-buffer
466 * @buf: node to write
467 * @len: node length
468 *
469 * This function writes data to flash via write-buffer @wbuf. This means that
470 * the last piece of the node won't reach the flash media immediately if it
471 * does not take whole minimal I/O unit. Instead, the node will sit in RAM
472 * until the write-buffer is synchronized (e.g., by timer).
473 *
474 * This function returns zero in case of success and a negative error code in
475 * case of failure. If the node cannot be written because there is no more
476 * space in this logical eraseblock, %-ENOSPC is returned.
477 */
478int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
479{
480 struct ubifs_info *c = wbuf->c;
481 int err, written, n, aligned_len = ALIGN(len, 8), offs;
482
483 dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len,
484 dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum,
485 wbuf->offs + wbuf->used);
486 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
487 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
488 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
489 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size);
490 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
491
492 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
493 err = -ENOSPC;
494 goto out;
495 }
496
497 cancel_wbuf_timer_nolock(wbuf);
498
499 if (c->ro_media)
500 return -EROFS;
501
502 if (aligned_len <= wbuf->avail) {
503 /*
504 * The node is not very large and fits entirely within
505 * write-buffer.
506 */
507 memcpy(wbuf->buf + wbuf->used, buf, len);
508
509 if (aligned_len == wbuf->avail) {
510 dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum,
511 wbuf->offs);
512 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
513 wbuf->offs, c->min_io_size,
514 wbuf->dtype);
515 if (err)
516 goto out;
517
518 spin_lock(&wbuf->lock);
519 wbuf->offs += c->min_io_size;
520 wbuf->avail = c->min_io_size;
521 wbuf->used = 0;
522 wbuf->next_ino = 0;
523 spin_unlock(&wbuf->lock);
524 } else {
525 spin_lock(&wbuf->lock);
526 wbuf->avail -= aligned_len;
527 wbuf->used += aligned_len;
528 spin_unlock(&wbuf->lock);
529 }
530
531 goto exit;
532 }
533
534 /*
535 * The node is large enough and does not fit entirely within current
536 * minimal I/O unit. We have to fill and flush write-buffer and switch
537 * to the next min. I/O unit.
538 */
539 dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs);
540 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
541 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
542 c->min_io_size, wbuf->dtype);
543 if (err)
544 goto out;
545
546 offs = wbuf->offs + c->min_io_size;
547 len -= wbuf->avail;
548 aligned_len -= wbuf->avail;
549 written = wbuf->avail;
550
551 /*
552 * The remaining data may take more whole min. I/O units, so write the
553 * remains multiple to min. I/O unit size directly to the flash media.
554 * We align node length to 8-byte boundary because we anyway flash wbuf
555 * if the remaining space is less than 8 bytes.
556 */
557 n = aligned_len >> c->min_io_shift;
558 if (n) {
559 n <<= c->min_io_shift;
560 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, offs);
561 err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, offs, n,
562 wbuf->dtype);
563 if (err)
564 goto out;
565 offs += n;
566 aligned_len -= n;
567 len -= n;
568 written += n;
569 }
570
571 spin_lock(&wbuf->lock);
572 if (aligned_len)
573 /*
574 * And now we have what's left and what does not take whole
575 * min. I/O unit, so write it to the write-buffer and we are
576 * done.
577 */
578 memcpy(wbuf->buf, buf + written, len);
579
580 wbuf->offs = offs;
581 wbuf->used = aligned_len;
582 wbuf->avail = c->min_io_size - aligned_len;
583 wbuf->next_ino = 0;
584 spin_unlock(&wbuf->lock);
585
586exit:
587 if (wbuf->sync_callback) {
588 int free = c->leb_size - wbuf->offs - wbuf->used;
589
590 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
591 if (err)
592 goto out;
593 }
594
595 if (wbuf->used)
596 new_wbuf_timer_nolock(wbuf);
597
598 return 0;
599
600out:
601 ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
602 len, wbuf->lnum, wbuf->offs, err);
603 dbg_dump_node(c, buf);
604 dbg_dump_stack();
605 dbg_dump_leb(c, wbuf->lnum);
606 return err;
607}
608
609/**
610 * ubifs_write_node - write node to the media.
611 * @c: UBIFS file-system description object
612 * @buf: the node to write
613 * @len: node length
614 * @lnum: logical eraseblock number
615 * @offs: offset within the logical eraseblock
616 * @dtype: node life-time hint (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
617 *
618 * This function automatically fills node magic number, assigns sequence
619 * number, and calculates node CRC checksum. The length of the @buf buffer has
620 * to be aligned to the minimal I/O unit size. This function automatically
621 * appends padding node and padding bytes if needed. Returns zero in case of
622 * success and a negative error code in case of failure.
623 */
624int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
625 int offs, int dtype)
626{
627 int err, buf_len = ALIGN(len, c->min_io_size);
628
629 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
630 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
631 buf_len);
632 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
633 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
634
635 if (c->ro_media)
636 return -EROFS;
637
638 ubifs_prepare_node(c, buf, len, 1);
639 err = ubi_leb_write(c->ubi, lnum, buf, offs, buf_len, dtype);
640 if (err) {
641 ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
642 buf_len, lnum, offs, err);
643 dbg_dump_node(c, buf);
644 dbg_dump_stack();
645 }
646
647 return err;
648}
649
650/**
651 * ubifs_read_node_wbuf - read node from the media or write-buffer.
652 * @wbuf: wbuf to check for un-written data
653 * @buf: buffer to read to
654 * @type: node type
655 * @len: node length
656 * @lnum: logical eraseblock number
657 * @offs: offset within the logical eraseblock
658 *
659 * This function reads a node of known type and length, checks it and stores
660 * in @buf. If the node partially or fully sits in the write-buffer, this
661 * function takes data from the buffer, otherwise it reads the flash media.
662 * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative
663 * error code in case of failure.
664 */
665int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
666 int lnum, int offs)
667{
668 const struct ubifs_info *c = wbuf->c;
669 int err, rlen, overlap;
670 struct ubifs_ch *ch = buf;
671
672 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
673 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
674 ubifs_assert(!(offs & 7) && offs < c->leb_size);
675 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
676
677 spin_lock(&wbuf->lock);
678 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
679 if (!overlap) {
680 /* We may safely unlock the write-buffer and read the data */
681 spin_unlock(&wbuf->lock);
682 return ubifs_read_node(c, buf, type, len, lnum, offs);
683 }
684
685 /* Don't read under wbuf */
686 rlen = wbuf->offs - offs;
687 if (rlen < 0)
688 rlen = 0;
689
690 /* Copy the rest from the write-buffer */
691 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
692 spin_unlock(&wbuf->lock);
693
694 if (rlen > 0) {
695 /* Read everything that goes before write-buffer */
696 err = ubi_read(c->ubi, lnum, buf, offs, rlen);
697 if (err && err != -EBADMSG) {
698 ubifs_err("failed to read node %d from LEB %d:%d, "
699 "error %d", type, lnum, offs, err);
700 dbg_dump_stack();
701 return err;
702 }
703 }
704
705 if (type != ch->node_type) {
706 ubifs_err("bad node type (%d but expected %d)",
707 ch->node_type, type);
708 goto out;
709 }
710
711 err = ubifs_check_node(c, buf, lnum, offs, 0);
712 if (err) {
713 ubifs_err("expected node type %d", type);
714 return err;
715 }
716
717 rlen = le32_to_cpu(ch->len);
718 if (rlen != len) {
719 ubifs_err("bad node length %d, expected %d", rlen, len);
720 goto out;
721 }
722
723 return 0;
724
725out:
726 ubifs_err("bad node at LEB %d:%d", lnum, offs);
727 dbg_dump_node(c, buf);
728 dbg_dump_stack();
729 return -EINVAL;
730}
731
732/**
733 * ubifs_read_node - read node.
734 * @c: UBIFS file-system description object
735 * @buf: buffer to read to
736 * @type: node type
737 * @len: node length (not aligned)
738 * @lnum: logical eraseblock number
739 * @offs: offset within the logical eraseblock
740 *
741 * This function reads a node of known type and and length, checks it and
742 * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched
743 * and a negative error code in case of failure.
744 */
745int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
746 int lnum, int offs)
747{
748 int err, l;
749 struct ubifs_ch *ch = buf;
750
751 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
752 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
753 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
754 ubifs_assert(!(offs & 7) && offs < c->leb_size);
755 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
756
757 err = ubi_read(c->ubi, lnum, buf, offs, len);
758 if (err && err != -EBADMSG) {
759 ubifs_err("cannot read node %d from LEB %d:%d, error %d",
760 type, lnum, offs, err);
761 return err;
762 }
763
764 if (type != ch->node_type) {
765 ubifs_err("bad node type (%d but expected %d)",
766 ch->node_type, type);
767 goto out;
768 }
769
770 err = ubifs_check_node(c, buf, lnum, offs, 0);
771 if (err) {
772 ubifs_err("expected node type %d", type);
773 return err;
774 }
775
776 l = le32_to_cpu(ch->len);
777 if (l != len) {
778 ubifs_err("bad node length %d, expected %d", l, len);
779 goto out;
780 }
781
782 return 0;
783
784out:
785 ubifs_err("bad node at LEB %d:%d", lnum, offs);
786 dbg_dump_node(c, buf);
787 dbg_dump_stack();
788 return -EINVAL;
789}
790
791/**
792 * ubifs_wbuf_init - initialize write-buffer.
793 * @c: UBIFS file-system description object
794 * @wbuf: write-buffer to initialize
795 *
796 * This function initializes write buffer. Returns zero in case of success
797 * %-ENOMEM in case of failure.
798 */
799int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
800{
801 size_t size;
802
803 wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL);
804 if (!wbuf->buf)
805 return -ENOMEM;
806
807 size = (c->min_io_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
808 wbuf->inodes = kmalloc(size, GFP_KERNEL);
809 if (!wbuf->inodes) {
810 kfree(wbuf->buf);
811 wbuf->buf = NULL;
812 return -ENOMEM;
813 }
814
815 wbuf->used = 0;
816 wbuf->lnum = wbuf->offs = -1;
817 wbuf->avail = c->min_io_size;
818 wbuf->dtype = UBI_UNKNOWN;
819 wbuf->sync_callback = NULL;
820 mutex_init(&wbuf->io_mutex);
821 spin_lock_init(&wbuf->lock);
822
823 wbuf->c = c;
824 init_timer(&wbuf->timer);
825 wbuf->timer.function = wbuf_timer_callback_nolock;
826 wbuf->timer.data = (unsigned long)wbuf;
827 wbuf->timeout = DEFAULT_WBUF_TIMEOUT;
828 wbuf->next_ino = 0;
829
830 return 0;
831}
832
833/**
834 * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array.
835 * @wbuf: the write-buffer whereto add
836 * @inum: the inode number
837 *
838 * This function adds an inode number to the inode array of the write-buffer.
839 */
840void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
841{
842 if (!wbuf->buf)
843 /* NOR flash or something similar */
844 return;
845
846 spin_lock(&wbuf->lock);
847 if (wbuf->used)
848 wbuf->inodes[wbuf->next_ino++] = inum;
849 spin_unlock(&wbuf->lock);
850}
851
852/**
853 * wbuf_has_ino - returns if the wbuf contains data from the inode.
854 * @wbuf: the write-buffer
855 * @inum: the inode number
856 *
857 * This function returns with %1 if the write-buffer contains some data from the
858 * given inode otherwise it returns with %0.
859 */
860static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
861{
862 int i, ret = 0;
863
864 spin_lock(&wbuf->lock);
865 for (i = 0; i < wbuf->next_ino; i++)
866 if (inum == wbuf->inodes[i]) {
867 ret = 1;
868 break;
869 }
870 spin_unlock(&wbuf->lock);
871
872 return ret;
873}
874
875/**
876 * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode.
877 * @c: UBIFS file-system description object
878 * @inode: inode to synchronize
879 *
880 * This function synchronizes write-buffers which contain nodes belonging to
881 * @inode. Returns zero in case of success and a negative error code in case of
882 * failure.
883 */
884int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
885{
886 int i, err = 0;
887
888 for (i = 0; i < c->jhead_cnt; i++) {
889 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
890
891 if (i == GCHD)
892 /*
893 * GC head is special, do not look at it. Even if the
894 * head contains something related to this inode, it is
895 * a _copy_ of corresponding on-flash node which sits
896 * somewhere else.
897 */
898 continue;
899
900 if (!wbuf_has_ino(wbuf, inode->i_ino))
901 continue;
902
903 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
904 if (wbuf_has_ino(wbuf, inode->i_ino))
905 err = ubifs_wbuf_sync_nolock(wbuf);
906 mutex_unlock(&wbuf->io_mutex);
907
908 if (err) {
909 ubifs_ro_mode(c, err);
910 return err;
911 }
912 }
913 return 0;
914}