]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Aug 2010 20:27:41 +0000 (13:27 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Aug 2010 20:27:41 +0000 (13:27 -0700)
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Fix build error: conflicting types for ‘sys_execve’

79 files changed:
Documentation/powerpc/booting-without-of.txt
arch/m68k/include/asm/ide.h
arch/m68knommu/kernel/process.c
arch/microblaze/kernel/prom_parse.c
arch/microblaze/pci/pci-common.c
arch/microblaze/pci/xilinx_pci.c
drivers/ata/sata_dwc_460ex.c
drivers/block/xsysace.c
drivers/char/pty.c
drivers/char/tty_io.c
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mmc/core/host.c
drivers/mtd/maps/physmap_of.c
drivers/serial/of_serial.c
drivers/spi/coldfire_qspi.c
drivers/staging/pohmelfs/path_entry.c
fs/buffer.c
fs/cramfs/inode.c
fs/dcache.c
fs/exec.c
fs/fat/misc.c
fs/file_table.c
fs/fs_struct.c
fs/generic_acl.c
fs/hostfs/hostfs_kern.c
fs/internal.h
fs/jbd/checkpoint.c
fs/jbd/commit.c
fs/jbd/journal.c
fs/jbd/revoke.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/revoke.c
fs/mbcache.c
fs/namei.c
fs/namespace.c
fs/nilfs2/super.c
fs/open.c
fs/pnode.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/super.c
fs/ufs/balloc.c
fs/ufs/ialloc.c
fs/ufs/truncate.c
fs/ufs/util.c
fs/ufs/util.h
include/asm-generic/syscalls.h
include/linux/buffer_head.h
include/linux/fs.h
include/linux/fs_struct.h
include/linux/lglock.h [new file with mode: 0644]
include/linux/spi/spi.h
include/linux/tty.h
include/sound/emu10k1.h
kernel/fork.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_functions_graph.c
lib/Kconfig.debug
scripts/recordmcount.pl
security/apparmor/path.c
security/selinux/hooks.c
sound/core/pcm_native.c
sound/pci/emu10k1/emu10k1.c
sound/pci/emu10k1/emupcm.c
sound/pci/emu10k1/memory.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/riptide/riptide.c
sound/soc/codecs/wm8776.c
tools/perf/Makefile
tools/perf/util/ui/browsers/annotate.c

index 568fa08e82e54d03322810eab5bb26ae192f2065..302db5da49b37812eb19bf79ea0e2952e5f3a21f 100644 (file)
@@ -49,40 +49,13 @@ Table of Contents
       f) MDIO on GPIOs
       g) SPI busses
 
-  VII - Marvell Discovery mv64[345]6x System Controller chips
-    1) The /system-controller node
-    2) Child nodes of /system-controller
-      a) Marvell Discovery MDIO bus
-      b) Marvell Discovery ethernet controller
-      c) Marvell Discovery PHY nodes
-      d) Marvell Discovery SDMA nodes
-      e) Marvell Discovery BRG nodes
-      f) Marvell Discovery CUNIT nodes
-      g) Marvell Discovery MPSCROUTING nodes
-      h) Marvell Discovery MPSCINTR nodes
-      i) Marvell Discovery MPSC nodes
-      j) Marvell Discovery Watch Dog Timer nodes
-      k) Marvell Discovery I2C nodes
-      l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
-      m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
-      n) Marvell Discovery GPP (General Purpose Pins) nodes
-      o) Marvell Discovery PCI host bridge node
-      p) Marvell Discovery CPU Error nodes
-      q) Marvell Discovery SRAM Controller nodes
-      r) Marvell Discovery PCI Error Handler nodes
-      s) Marvell Discovery Memory Controller nodes
-
-  VIII - Specifying interrupt information for devices
+  VII - Specifying interrupt information for devices
     1) interrupts property
     2) interrupt-parent property
     3) OpenPIC Interrupt Controllers
     4) ISA Interrupt Controllers
 
-  IX - Specifying GPIO information for devices
-    1) gpios property
-    2) gpio-controller nodes
-
-  X - Specifying device power management information (sleep property)
+  VIII - Specifying device power management information (sleep property)
 
   Appendix A - Sample SOC node for MPC8540
 
index 3958726664bad268c0e185a462f6859bfe3e67f5..492fee8a1ab2e6491f52c961b1a93017eef49a41 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  linux/include/asm-m68k/ide.h
- *
  *  Copyright (C) 1994-1996  Linus Torvalds & authors
  */
 
@@ -34,6 +32,8 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 
+#ifdef CONFIG_MMU
+
 /*
  * Get rid of defs from io.h - ide has its private and conflicting versions
  * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we
 #define __ide_mm_outsw(port, addr, n)  raw_outsw((u16 *)port, addr, n)
 #define __ide_mm_outsl(port, addr, n)  raw_outsl((u32 *)port, addr, n)
 
+#else
+
+#define __ide_mm_insw(port, addr, n)   io_insw((unsigned int)port, addr, n)
+#define __ide_mm_insl(port, addr, n)   io_insl((unsigned int)port, addr, n)
+#define __ide_mm_outsw(port, addr, n)  io_outsw((unsigned int)port, addr, n)
+#define __ide_mm_outsl(port, addr, n)  io_outsl((unsigned int)port, addr, n)
+
+#endif /* CONFIG_MMU */
+
 #endif /* __KERNEL__ */
 #endif /* _M68K_IDE_H */
index 4d090d3c08971dbeb45c2258d2d2d60805219b59..6d3390590e5ba24be497b5c4a42d0327dbae6cf9 100644 (file)
@@ -316,14 +316,14 @@ void dump(struct pt_regs *fp)
                fp->d0, fp->d1, fp->d2, fp->d3);
        printk(KERN_EMERG "d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
                fp->d4, fp->d5, fp->a0, fp->a1);
-       printk(KERN_EMERG "\nUSP: %08x   TRAPFRAME: %08x\n",
-               (unsigned int) rdusp(), (unsigned int) fp);
+       printk(KERN_EMERG "\nUSP: %08x   TRAPFRAME: %p\n",
+               (unsigned int) rdusp(), fp);
 
        printk(KERN_EMERG "\nCODE:");
        tp = ((unsigned char *) fp->pc) - 0x20;
        for (sp = (unsigned long *) tp, i = 0; (i < 0x40);  i += 4) {
                if ((i % 0x10) == 0)
-                       printk(KERN_EMERG "%08x: ", (int) (tp + i));
+                       printk(KERN_EMERG "%p: ", tp + i);
                printk("%08x ", (int) *sp++);
        }
        printk(KERN_EMERG "\n");
@@ -332,7 +332,7 @@ void dump(struct pt_regs *fp)
        tp = ((unsigned char *) fp) - 0x40;
        for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
                if ((i % 0x10) == 0)
-                       printk(KERN_EMERG "%08x: ", (int) (tp + i));
+                       printk(KERN_EMERG "%p: ", tp + i);
                printk("%08x ", (int) *sp++);
        }
        printk(KERN_EMERG "\n");
@@ -341,7 +341,7 @@ void dump(struct pt_regs *fp)
        tp = (unsigned char *) (rdusp() - 0x10);
        for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
                if ((i % 0x10) == 0)
-                       printk(KERN_EMERG "%08x: ", (int) (tp + i));
+                       printk(KERN_EMERG "%p: ", tp + i);
                printk("%08x ", (int) *sp++);
        }
        printk(KERN_EMERG "\n");
index d33ba17601fa20d61c86c3ea982aab72ca6508e1..99d9b61cccb592cb34985bf79fb777b0521a36bf 100644 (file)
@@ -73,7 +73,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
                /* We can only get here if we hit a P2P bridge with no node,
                 * let's do standard swizzling and try again
                 */
-               lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+               lspec = pci_swizzle_interrupt_pin(pdev, lspec);
                pdev = ppdev;
        }
 
index 23be25fec4d67bf7e48d612b50e1da9f766371ae..55ef532f32be6fc3c174e1280a7971ffc5329406 100644 (file)
 #include <linux/irq.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/byteorder.h>
 
@@ -1077,7 +1078,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
                struct dev_archdata *sd = &dev->dev.archdata;
 
                /* Setup OF node pointer in archdata */
-               sd->of_node = pci_device_to_OF_node(dev);
+               dev->dev.of_node = pci_device_to_OF_node(dev);
 
                /* Fixup NUMA node as it may not be setup yet by the generic
                 * code and is needed by the DMA init
index 7869a41b0f94cadff95ad17dc4eaf5a713b536c3..0687a42a5bd475166afed6e816c721a51517b4c9 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/ioport.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 #include <asm/io.h>
 
index ea24c1e51be221e167ebc7fc7c2bd2658aff8e58..2673a3d1480654ceec39f2ab144b15cde72ba72e 100644 (file)
@@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = {
        },
 };
 
-static int sata_dwc_probe(struct of_device *ofdev,
+static int sata_dwc_probe(struct platform_device *ofdev,
                        const struct of_device_id *match)
 {
        struct sata_dwc_device *hsdev;
@@ -1702,7 +1702,7 @@ error_out:
        return err;
 }
 
-static int sata_dwc_remove(struct of_device *ofdev)
+static int sata_dwc_remove(struct platform_device *ofdev)
 {
        struct device *dev = &ofdev->dev;
        struct ata_host *host = dev_get_drvdata(dev);
index 2982b3ee9465d1521a6296186c52be9c3a8dc462..057413bb16e294d20d476d49ea4770b56a8e986c 100644 (file)
@@ -94,6 +94,7 @@
 #include <linux/hdreg.h>
 #include <linux/platform_device.h>
 #if defined(CONFIG_OF)
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #endif
index ad46eae1f9bb207847fd8ac2bf80b58d4f3922d8..c350d01716bdace6ef510809e964a57c7129134b 100644 (file)
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        }
 
        set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-       filp->private_data = tty;
-       file_move(filp, &tty->tty_files);
+
+       tty_add_file(tty, filp);
 
        retval = devpts_pty_new(inode, tty->link);
        if (retval)
index 0350c42375a217c0337cdce4855fb9ad54a9455c..949067a0bd4743151515382b07ccf7aecad11316 100644 (file)
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers);                     /* linked list of tty drivers */
 DEFINE_MUTEX(tty_mutex);
 EXPORT_SYMBOL(tty_mutex);
 
+/* Spinlock to protect the tty->tty_files list */
+DEFINE_SPINLOCK(tty_files_lock);
+
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
 static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
 ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty)
        kfree(tty);
 }
 
+static inline struct tty_struct *file_tty(struct file *file)
+{
+       return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+       struct tty_file_private *priv;
+
+       /* XXX: must implement proper error handling in callers */
+       priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
+
+       priv->tty = tty;
+       priv->file = file;
+       file->private_data = priv;
+
+       spin_lock(&tty_files_lock);
+       list_add(&priv->list, &tty->tty_files);
+       spin_unlock(&tty_files_lock);
+}
+
+/* Delete file from its tty */
+void tty_del_file(struct file *file)
+{
+       struct tty_file_private *priv = file->private_data;
+
+       spin_lock(&tty_files_lock);
+       list_del(&priv->list);
+       spin_unlock(&tty_files_lock);
+       file->private_data = NULL;
+       kfree(priv);
+}
+
+
 #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
 
 /**
@@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
        struct list_head *p;
        int count = 0;
 
-       file_list_lock();
+       spin_lock(&tty_files_lock);
        list_for_each(p, &tty->tty_files) {
                count++;
        }
-       file_list_unlock();
+       spin_unlock(&tty_files_lock);
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
            tty->driver->subtype == PTY_TYPE_SLAVE &&
            tty->link && tty->link->count)
@@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty)
        struct file *cons_filp = NULL;
        struct file *filp, *f = NULL;
        struct task_struct *p;
+       struct tty_file_private *priv;
        int    closecount = 0, n;
        unsigned long flags;
        int refs = 0;
@@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty)
 
 
        spin_lock(&redirect_lock);
-       if (redirect && redirect->private_data == tty) {
+       if (redirect && file_tty(redirect) == tty) {
                f = redirect;
                redirect = NULL;
        }
@@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty)
           workqueue with the lock held */
        check_tty_count(tty, "tty_hangup");
 
-       file_list_lock();
+       spin_lock(&tty_files_lock);
        /* This breaks for file handles being sent over AF_UNIX sockets ? */
-       list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
+       list_for_each_entry(priv, &tty->tty_files, list) {
+               filp = priv->file;
                if (filp->f_op->write == redirected_tty_write)
                        cons_filp = filp;
                if (filp->f_op->write != tty_write)
@@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty)
                __tty_fasync(-1, filp, 0);      /* can't block */
                filp->f_op = &hung_up_tty_fops;
        }
-       file_list_unlock();
+       spin_unlock(&tty_files_lock);
 
        tty_ldisc_hangup(tty);
 
@@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
                        loff_t *ppos)
 {
        int i;
-       struct tty_struct *tty;
-       struct inode *inode;
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
 
-       tty = file->private_data;
-       inode = file->f_path.dentry->d_inode;
        if (tty_paranoia_check(tty, inode, "tty_read"))
                return -EIO;
        if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
 static ssize_t tty_write(struct file *file, const char __user *buf,
                                                size_t count, loff_t *ppos)
 {
-       struct tty_struct *tty;
        struct inode *inode = file->f_path.dentry->d_inode;
+       struct tty_struct *tty = file_tty(file);
+       struct tty_ldisc *ld;
        ssize_t ret;
-       struct tty_ldisc *ld;
 
-       tty = file->private_data;
        if (tty_paranoia_check(tty, inode, "tty_write"))
                return -EIO;
        if (!tty || !tty->ops->write ||
@@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work)
        tty_driver_kref_put(driver);
        module_put(driver->owner);
 
-       file_list_lock();
+       spin_lock(&tty_files_lock);
        list_del_init(&tty->tty_files);
-       file_list_unlock();
+       spin_unlock(&tty_files_lock);
 
        put_pid(tty->pgrp);
        put_pid(tty->session);
@@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx)
 
 int tty_release(struct inode *inode, struct file *filp)
 {
-       struct tty_struct *tty, *o_tty;
+       struct tty_struct *tty = file_tty(filp);
+       struct tty_struct *o_tty;
        int     pty_master, tty_closing, o_tty_closing, do_sleep;
        int     devpts;
        int     idx;
        char    buf[64];
 
-       tty = filp->private_data;
        if (tty_paranoia_check(tty, inode, "tty_release_dev"))
                return 0;
 
@@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp)
         *  - do_tty_hangup no longer sees this file descriptor as
         *    something that needs to be handled for hangups.
         */
-       file_kill(filp);
-       filp->private_data = NULL;
+       tty_del_file(filp);
 
        /*
         * Perform some housekeeping before deciding whether to return.
@@ -1839,8 +1875,8 @@ got_driver:
                return PTR_ERR(tty);
        }
 
-       filp->private_data = tty;
-       file_move(filp, &tty->tty_files);
+       tty_add_file(tty, filp);
+
        check_tty_count(tty, "tty_open");
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
            tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1916,11 +1952,10 @@ got_driver:
 
 static unsigned int tty_poll(struct file *filp, poll_table *wait)
 {
-       struct tty_struct *tty;
+       struct tty_struct *tty = file_tty(filp);
        struct tty_ldisc *ld;
        int ret = 0;
 
-       tty = filp->private_data;
        if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
                return 0;
 
@@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
 
 static int __tty_fasync(int fd, struct file *filp, int on)
 {
-       struct tty_struct *tty;
+       struct tty_struct *tty = file_tty(filp);
        unsigned long flags;
        int retval = 0;
 
-       tty = filp->private_data;
        if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
                goto out;
 
@@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
  */
 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct tty_struct *tty, *real_tty;
+       struct tty_struct *tty = file_tty(file);
+       struct tty_struct *real_tty;
        void __user *p = (void __user *)arg;
        int retval;
        struct tty_ldisc *ld;
        struct inode *inode = file->f_dentry->d_inode;
 
-       tty = file->private_data;
        if (tty_paranoia_check(tty, inode, "tty_ioctl"))
                return -EINVAL;
 
@@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
                                unsigned long arg)
 {
        struct inode *inode = file->f_dentry->d_inode;
-       struct tty_struct *tty = file->private_data;
+       struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
        int retval = -ENOIOCTLCMD;
 
@@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty)
                                if (!filp)
                                        continue;
                                if (filp->f_op->read == tty_read &&
-                                   filp->private_data == tty) {
+                                   file_tty(filp) == tty) {
                                        printk(KERN_NOTICE "SAK: killed process %d"
                                            " (%s): fd#%d opened to the tty\n",
                                            task_pid_nr(p), p->comm, i);
index 0ed763cd2e77499471bc6f4abc0da36e199ec3dd..b663d573aad99ed5257f9ab324d566309e70ac37 100644 (file)
@@ -94,6 +94,7 @@
 
 #ifdef CONFIG_OF
 /* For open firmware. */
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #endif
index 11567c7999a243d3d95ebecdb623c26de49705ce..c148b630215484f9689bf9257d6acf286685c37a 100644 (file)
@@ -2136,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
         * with the rest of the array)
         */
        mdk_rdev_t *rdev;
-
-       /* First make sure individual recovery_offsets are correct */
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
-               if (rdev->raid_disk >= 0 &&
-                   mddev->delta_disks >= 0 &&
-                   !test_bit(In_sync, &rdev->flags) &&
-                   mddev->curr_resync_completed > rdev->recovery_offset)
-                               rdev->recovery_offset = mddev->curr_resync_completed;
-
-       }       
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                if (rdev->sb_events == mddev->events ||
                    (nospares &&
@@ -2167,12 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change)
        int sync_req;
        int nospares = 0;
 
-       mddev->utime = get_seconds();
-       if (mddev->external)
-               return;
 repeat:
+       /* First make sure individual recovery_offsets are correct */
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               if (rdev->raid_disk >= 0 &&
+                   mddev->delta_disks >= 0 &&
+                   !test_bit(In_sync, &rdev->flags) &&
+                   mddev->curr_resync_completed > rdev->recovery_offset)
+                               rdev->recovery_offset = mddev->curr_resync_completed;
+
+       }       
+       if (mddev->external || !mddev->persistent) {
+               clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+               clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               wake_up(&mddev->sb_wait);
+               return;
+       }
+
        spin_lock_irq(&mddev->write_lock);
 
+       mddev->utime = get_seconds();
+
        set_bit(MD_CHANGE_PENDING, &mddev->flags);
        if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
                force_change = 1;
@@ -2221,19 +2226,6 @@ repeat:
                MD_BUG();
                mddev->events --;
        }
-
-       /*
-        * do not write anything to disk if using
-        * nonpersistent superblocks
-        */
-       if (!mddev->persistent) {
-               if (!mddev->external)
-                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-
-               spin_unlock_irq(&mddev->write_lock);
-               wake_up(&mddev->sb_wait);
-               return;
-       }
        sync_sbs(mddev, nospares);
        spin_unlock_irq(&mddev->write_lock);
 
index 73cc74ffc26bd5eefb7166aa37130e4c47daa56a..ad83a4dcadc3ed7cafa914d2e4dcb7ef1a939fdf 100644 (file)
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        struct bio_list bl;
        struct page **behind_pages = NULL;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = (bio->bi_rw & REQ_SYNC);
-       bool do_barriers;
+       const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+       unsigned long do_barriers;
        mdk_rdev_t *blocked_rdev;
 
        /*
@@ -1120,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev)
 {
        int i;
        conf_t *conf = mddev->private;
+       int count = 0;
+       unsigned long flags;
 
        /*
         * Find all failed disks within the RAID1 configuration 
@@ -1131,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev)
                if (rdev
                    && !test_bit(Faulty, &rdev->flags)
                    && !test_and_set_bit(In_sync, &rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded--;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       count++;
+                       sysfs_notify_dirent(rdev->sysfs_state);
                }
        }
+       spin_lock_irqsave(&conf->device_lock, flags);
+       mddev->degraded -= count;
+       spin_unlock_irqrestore(&conf->device_lock, flags);
 
        print_conf(conf);
-       return 0;
+       return count;
 }
 
 
@@ -1640,7 +1643,7 @@ static void raid1d(mddev_t *mddev)
                         * We already have a nr_pending reference on these rdevs.
                         */
                        int i;
-                       const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
+                       const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
                        clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
                        clear_bit(R1BIO_Barrier, &r1_bio->state);
                        for (i=0; i < conf->raid_disks; i++)
@@ -1696,7 +1699,7 @@ static void raid1d(mddev_t *mddev)
                                       (unsigned long long)r1_bio->sector);
                                raid_end_bio_io(r1_bio);
                        } else {
-                               const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
+                               const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
                                r1_bio->bios[r1_bio->read_disk] =
                                        mddev->ro ? IO_BLOCKED : NULL;
                                r1_bio->read_disk = disk;
index a88aeb5198c76a6c3a5ed58693d71f751ae975a7..84718383124d665f2c9382f5149d99773acde408 100644 (file)
@@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        int i;
        int chunk_sects = conf->chunk_mask + 1;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = (bio->bi_rw & REQ_SYNC);
+       const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        struct bio_list bl;
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
@@ -1116,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev)
        int i;
        conf_t *conf = mddev->private;
        mirror_info_t *tmp;
+       int count = 0;
+       unsigned long flags;
 
        /*
         * Find all non-in_sync disks within the RAID10 configuration
@@ -1126,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev)
                if (tmp->rdev
                    && !test_bit(Faulty, &tmp->rdev->flags)
                    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded--;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       count++;
+                       sysfs_notify_dirent(tmp->rdev->sysfs_state);
                }
        }
+       spin_lock_irqsave(&conf->device_lock, flags);
+       mddev->degraded -= count;
+       spin_unlock_irqrestore(&conf->device_lock, flags);
 
        print_conf(conf);
-       return 0;
+       return count;
 }
 
 
@@ -1734,7 +1737,7 @@ static void raid10d(mddev_t *mddev)
                                raid_end_bio_io(r10_bio);
                                bio_put(bio);
                        } else {
-                               const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+                               const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
                                bio_put(bio);
                                rdev = conf->mirrors[mirror].rdev;
                                if (printk_ratelimit())
index 866d4b5a144c465daf21e439b9b0e6ef36571d6a..69b0a169e43d483094200d88cd7d4e5ae05e9d19 100644 (file)
@@ -5330,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev)
        int i;
        raid5_conf_t *conf = mddev->private;
        struct disk_info *tmp;
+       int count = 0;
+       unsigned long flags;
 
        for (i = 0; i < conf->raid_disks; i++) {
                tmp = conf->disks + i;
@@ -5337,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev)
                    && tmp->rdev->recovery_offset == MaxSector
                    && !test_bit(Faulty, &tmp->rdev->flags)
                    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded--;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       count++;
+                       sysfs_notify_dirent(tmp->rdev->sysfs_state);
                }
        }
+       spin_lock_irqsave(&conf->device_lock, flags);
+       mddev->degraded -= count;
+       spin_unlock_irqrestore(&conf->device_lock, flags);
        print_raid5_conf(conf);
-       return 0;
+       return count;
 }
 
 static int raid5_remove_disk(mddev_t *mddev, int number)
index 0efe631e50cab2ddda5e6a98022e5b8e49f46705..d80cfdc8edd2663841894348fa33e7dc4e597b1e 100644 (file)
@@ -86,7 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        init_waitqueue_head(&host->wq);
        INIT_DELAYED_WORK(&host->detect, mmc_rescan);
        INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
+#ifdef CONFIG_PM
        host->pm_notify.notifier_call = mmc_pm_notify;
+#endif
 
        /*
         * By default, hosts do not support SGIO or large requests.
index 00af55d7afba60785b61b011814aba2e46a6f0df..fe63f6bd663c1f7a5ac8db57a0014c1c4a019da0 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/concat.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 
index 659a695bdad6474e832c9d4ffbef7648fe45c4d5..2af8fd1131234be29fd612b207da23543c78870b 100644 (file)
 #include <linux/slab.h>
 #include <linux/serial_core.h>
 #include <linux/serial_8250.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/nwpserial.h>
 
-#include <asm/prom.h>
-
 struct of_serial_info {
        int type;
        int line;
index 59be3efe063621e2fffb7b576e8ea0d8daa10b32..052b3c7fa6a0f644d26c613e67e69ffe00ad05be 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/io.h>
index cdc4dd50d638a51abc240c780989b4433856b27c..8ec83d2dffb75ec23c67d35cce2c5ee272943c1a 100644 (file)
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le
                return -ENOENT;
        }
 
-       read_lock(&current->fs->lock);
+       spin_lock(&current->fs->lock);
        path.mnt = mntget(current->fs->root.mnt);
-       read_unlock(&current->fs->lock);
+       spin_unlock(&current->fs->lock);
 
        path.dentry = d;
 
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
                return -ENOENT;
        }
 
-       read_lock(&current->fs->lock);
+       spin_lock(&current->fs->lock);
        root = dget(current->fs->root.dentry);
-       read_unlock(&current->fs->lock);
+       spin_unlock(&current->fs->lock);
 
        spin_lock(&dcache_lock);
 
index 50efa339e051f7b7a5d417160ff528ca94e3adfa..3e7dca279d1c0dff3fdb7e0e2e7d8d236af7d4c9 100644 (file)
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                spin_unlock(lock);
                                /*
                                 * Ensure any pending I/O completes so that
-                                * ll_rw_block() actually writes the current
-                                * contents - it is a noop if I/O is still in
-                                * flight on potentially older contents.
+                                * write_dirty_buffer() actually writes the
+                                * current contents - it is a noop if I/O is
+                                * still in flight on potentially older
+                                * contents.
                                 */
-                               ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+                               write_dirty_buffer(bh, WRITE_SYNC_PLUG);
 
                                /*
                                 * Kick off IO for the previous mapping. Note
@@ -2911,13 +2912,6 @@ int submit_bh(int rw, struct buffer_head * bh)
        BUG_ON(buffer_delay(bh));
        BUG_ON(buffer_unwritten(bh));
 
-       /*
-        * Mask in barrier bit for a write (could be either a WRITE or a
-        * WRITE_SYNC
-        */
-       if (buffer_ordered(bh) && (rw & WRITE))
-               rw |= WRITE_BARRIER;
-
        /*
         * Only clear out a write error when rewriting
         */
@@ -2956,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh);
 
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
  * @nr: number of &struct buffer_heads in the array
  * @bhs: array of pointers to &struct buffer_head
  *
  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
- * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
- * are sent to disk. The fourth %READA option is described in the documentation
- * for generic_make_request() which ll_rw_block() calls.
+ * %READA option is described in the documentation for generic_make_request()
+ * which ll_rw_block() calls.
  *
  * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
- * clean when doing a write request, and any buffer that appears to be
- * up-to-date when doing read request.  Further it marks as clean buffers that
- * are processed for writing (the buffer cache won't assume that they are
- * actually clean until the buffer gets unlocked).
+ * BH_Lock state bit), any buffer that appears to be clean when doing a write
+ * request, and any buffer that appears to be up-to-date when doing read
+ * request.  Further it marks as clean buffers that are processed for
+ * writing (the buffer cache won't assume that they are actually clean
+ * until the buffer gets unlocked).
  *
  * ll_rw_block sets b_end_io to simple completion handler that marks
  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2987,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
        for (i = 0; i < nr; i++) {
                struct buffer_head *bh = bhs[i];
 
-               if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
-                       lock_buffer(bh);
-               else if (!trylock_buffer(bh))
+               if (!trylock_buffer(bh))
                        continue;
-
-               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
-                   rw == SWRITE_SYNC_PLUG) {
+               if (rw == WRITE) {
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
                                get_bh(bh);
-                               if (rw == SWRITE_SYNC)
-                                       submit_bh(WRITE_SYNC, bh);
-                               else
-                                       submit_bh(WRITE, bh);
+                               submit_bh(WRITE, bh);
                                continue;
                        }
                } else {
@@ -3016,12 +3002,25 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
 }
 EXPORT_SYMBOL(ll_rw_block);
 
+void write_dirty_buffer(struct buffer_head *bh, int rw)
+{
+       lock_buffer(bh);
+       if (!test_clear_buffer_dirty(bh)) {
+               unlock_buffer(bh);
+               return;
+       }
+       bh->b_end_io = end_buffer_write_sync;
+       get_bh(bh);
+       submit_bh(rw, bh);
+}
+EXPORT_SYMBOL(write_dirty_buffer);
+
 /*
  * For a data-integrity writeout, we need to wait upon any in-progress I/O
  * and then start new I/O and then wait upon it.  The caller must have a ref on
  * the buffer_head.
  */
-int sync_dirty_buffer(struct buffer_head *bh)
+int __sync_dirty_buffer(struct buffer_head *bh, int rw)
 {
        int ret = 0;
 
@@ -3030,7 +3029,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               ret = submit_bh(WRITE_SYNC, bh);
+               ret = submit_bh(rw, bh);
                wait_on_buffer(bh);
                if (buffer_eopnotsupp(bh)) {
                        clear_buffer_eopnotsupp(bh);
@@ -3043,6 +3042,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
        }
        return ret;
 }
+EXPORT_SYMBOL(__sync_dirty_buffer);
+
+int sync_dirty_buffer(struct buffer_head *bh)
+{
+       return __sync_dirty_buffer(bh, WRITE_SYNC);
+}
 EXPORT_SYMBOL(sync_dirty_buffer);
 
 /*
index a53b130b366c738c654a7ec71a3e4b01ee6d8ec7..1e7a33028d33908807d776d05840637cfd7e4f22 100644 (file)
@@ -80,7 +80,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
                }
        } else {
                inode = iget_locked(sb, CRAMINO(cramfs_inode));
-               if (inode) {
+               if (inode && (inode->i_state & I_NEW)) {
                        setup_inode(inode, cramfs_inode);
                        unlock_new_inode(inode);
                }
index 4d13bf50b7b159774c592ce886c7fd9208372d2a..83293be4814965373d4c81e5b7d91bc63f3a55c6 100644 (file)
@@ -1332,31 +1332,13 @@ EXPORT_SYMBOL(d_add_ci);
  * d_lookup - search for a dentry
  * @parent: parent dentry
  * @name: qstr of name we wish to find
+ * Returns: dentry, or NULL
  *
- * Searches the children of the parent dentry for the name in question. If
- * the dentry is found its reference count is incremented and the dentry
- * is returned. The caller must use dput to free the entry when it has
- * finished using it. %NULL is returned on failure.
- *
- * __d_lookup is dcache_lock free. The hash list is protected using RCU.
- * Memory barriers are used while updating and doing lockless traversal. 
- * To avoid races with d_move while rename is happening, d_lock is used.
- *
- * Overflows in memcmp(), while d_move, are avoided by keeping the length
- * and name pointer in one structure pointed by d_qstr.
- *
- * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
- * lookup is going on.
- *
- * The dentry unused LRU is not updated even if lookup finds the required dentry
- * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
- * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
- * acquisition.
- *
- * d_lookup() is protected against the concurrent renames in some unrelated
- * directory using the seqlockt_t rename_lock.
+ * d_lookup searches the children of the parent dentry for the name in
+ * question. If the dentry is found its reference count is incremented and the
+ * dentry is returned. The caller must use dput to free the entry when it has
+ * finished using it. %NULL is returned if the dentry does not exist.
  */
-
 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
 {
        struct dentry * dentry = NULL;
@@ -1372,6 +1354,21 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
 }
 EXPORT_SYMBOL(d_lookup);
 
+/*
+ * __d_lookup - search for a dentry (racy)
+ * @parent: parent dentry
+ * @name: qstr of name we wish to find
+ * Returns: dentry, or NULL
+ *
+ * __d_lookup is like d_lookup, however it may (rarely) return a
+ * false-negative result due to unrelated rename activity.
+ *
+ * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
+ * however it must be used carefully, eg. with a following d_lookup in
+ * the case of failure.
+ *
+ * __d_lookup callers must be commented.
+ */
 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
 {
        unsigned int len = name->len;
@@ -1382,6 +1379,19 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
        struct hlist_node *node;
        struct dentry *dentry;
 
+       /*
+        * The hash list is protected using RCU.
+        *
+        * Take d_lock when comparing a candidate dentry, to avoid races
+        * with d_move().
+        *
+        * It is possible that concurrent renames can mess up our list
+        * walk here and result in missing our dentry, resulting in the
+        * false-negative result. d_lookup() protects against concurrent
+        * renames using rename_lock seqlock.
+        *
+        * See Documentation/vfs/dcache-locking.txt for more details.
+        */
        rcu_read_lock();
        
        hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
@@ -1396,8 +1406,8 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
 
                /*
                 * Recheck the dentry after taking the lock - d_move may have
-                * changed things.  Don't bother checking the hash because we're
-                * about to compare the whole name anyway.
+                * changed things. Don't bother checking the hash because
+                * we're about to compare the whole name anyway.
                 */
                if (dentry->d_parent != parent)
                        goto next;
@@ -1925,7 +1935,7 @@ static int prepend_path(const struct path *path, struct path *root,
        bool slash = false;
        int error = 0;
 
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        while (dentry != root->dentry || vfsmnt != root->mnt) {
                struct dentry * parent;
 
@@ -1954,7 +1964,7 @@ out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        return error;
 
 global_root:
@@ -2292,11 +2302,12 @@ int path_is_under(struct path *path1, struct path *path2)
        struct vfsmount *mnt = path1->mnt;
        struct dentry *dentry = path1->dentry;
        int res;
-       spin_lock(&vfsmount_lock);
+
+       br_read_lock(vfsmount_lock);
        if (mnt != path2->mnt) {
                for (;;) {
                        if (mnt->mnt_parent == mnt) {
-                               spin_unlock(&vfsmount_lock);
+                               br_read_unlock(vfsmount_lock);
                                return 0;
                        }
                        if (mnt->mnt_parent == path2->mnt)
@@ -2306,7 +2317,7 @@ int path_is_under(struct path *path1, struct path *path2)
                dentry = mnt->mnt_mountpoint;
        }
        res = is_subdir(dentry, path2->dentry);
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        return res;
 }
 EXPORT_SYMBOL(path_is_under);
index 05c7d6b84df7c770f07aa7148b9f47e6aacd060f..2d9455282744bce582e48e0ecec4f4a6d332a28c 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1118,7 +1118,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
        bprm->unsafe = tracehook_unsafe_exec(p);
 
        n_fs = 1;
-       write_lock(&p->fs->lock);
+       spin_lock(&p->fs->lock);
        rcu_read_lock();
        for (t = next_thread(p); t != p; t = next_thread(t)) {
                if (t->fs == p->fs)
@@ -1135,7 +1135,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
                        res = 1;
                }
        }
-       write_unlock(&p->fs->lock);
+       spin_unlock(&p->fs->lock);
 
        return res;
 }
index 1fa23f6ffba5b39a9921bc6f0b5eac0bfd46fecb..1736f23563888b2f0225a6add77d45c984270231 100644 (file)
@@ -250,7 +250,9 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
 {
        int i, err = 0;
 
-       ll_rw_block(SWRITE, nr_bhs, bhs);
+       for (i = 0; i < nr_bhs; i++)
+               write_dirty_buffer(bhs[i], WRITE);
+
        for (i = 0; i < nr_bhs; i++) {
                wait_on_buffer(bhs[i]);
                if (buffer_eopnotsupp(bhs[i])) {
index edecd36fed9bdcf7f49411a3bcf2f97283bd077b..a04bdd81c11ca3799d4429d8c5eb3579621e0628 100644 (file)
@@ -20,7 +20,9 @@
 #include <linux/cdev.h>
 #include <linux/fsnotify.h>
 #include <linux/sysctl.h>
+#include <linux/lglock.h>
 #include <linux/percpu_counter.h>
+#include <linux/percpu.h>
 #include <linux/ima.h>
 
 #include <asm/atomic.h>
@@ -32,8 +34,8 @@ struct files_stat_struct files_stat = {
        .max_files = NR_FILE
 };
 
-/* public. Not pretty! */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
+DECLARE_LGLOCK(files_lglock);
+DEFINE_LGLOCK(files_lglock);
 
 /* SLAB cache for file structures */
 static struct kmem_cache *filp_cachep __read_mostly;
@@ -249,7 +251,7 @@ static void __fput(struct file *file)
                cdev_put(inode->i_cdev);
        fops_put(file->f_op);
        put_pid(file->f_owner.pid);
-       file_kill(file);
+       file_sb_list_del(file);
        if (file->f_mode & FMODE_WRITE)
                drop_file_write_access(file);
        file->f_path.dentry = NULL;
@@ -328,41 +330,107 @@ struct file *fget_light(unsigned int fd, int *fput_needed)
        return file;
 }
 
-
 void put_filp(struct file *file)
 {
        if (atomic_long_dec_and_test(&file->f_count)) {
                security_file_free(file);
-               file_kill(file);
+               file_sb_list_del(file);
                file_free(file);
        }
 }
 
-void file_move(struct file *file, struct list_head *list)
+static inline int file_list_cpu(struct file *file)
 {
-       if (!list)
-               return;
-       file_list_lock();
-       list_move(&file->f_u.fu_list, list);
-       file_list_unlock();
+#ifdef CONFIG_SMP
+       return file->f_sb_list_cpu;
+#else
+       return smp_processor_id();
+#endif
+}
+
+/* helper for file_sb_list_add to reduce ifdefs */
+static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
+{
+       struct list_head *list;
+#ifdef CONFIG_SMP
+       int cpu;
+       cpu = smp_processor_id();
+       file->f_sb_list_cpu = cpu;
+       list = per_cpu_ptr(sb->s_files, cpu);
+#else
+       list = &sb->s_files;
+#endif
+       list_add(&file->f_u.fu_list, list);
 }
 
-void file_kill(struct file *file)
+/**
+ * file_sb_list_add - add a file to the sb's file list
+ * @file: file to add
+ * @sb: sb to add it to
+ *
+ * Use this function to associate a file with the superblock of the inode it
+ * refers to.
+ */
+void file_sb_list_add(struct file *file, struct super_block *sb)
+{
+       lg_local_lock(files_lglock);
+       __file_sb_list_add(file, sb);
+       lg_local_unlock(files_lglock);
+}
+
+/**
+ * file_sb_list_del - remove a file from the sb's file list
+ * @file: file to remove
+ * @sb: sb to remove it from
+ *
+ * Use this function to remove a file from its superblock.
+ */
+void file_sb_list_del(struct file *file)
 {
        if (!list_empty(&file->f_u.fu_list)) {
-               file_list_lock();
+               lg_local_lock_cpu(files_lglock, file_list_cpu(file));
                list_del_init(&file->f_u.fu_list);
-               file_list_unlock();
+               lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
        }
 }
 
+#ifdef CONFIG_SMP
+
+/*
+ * These macros iterate all files on all CPUs for a given superblock.
+ * files_lglock must be held globally.
+ */
+#define do_file_list_for_each_entry(__sb, __file)              \
+{                                                              \
+       int i;                                                  \
+       for_each_possible_cpu(i) {                              \
+               struct list_head *list;                         \
+               list = per_cpu_ptr((__sb)->s_files, i);         \
+               list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry                         \
+       }                                                       \
+}
+
+#else
+
+#define do_file_list_for_each_entry(__sb, __file)              \
+{                                                              \
+       struct list_head *list;                                 \
+       list = &(sb)->s_files;                                  \
+       list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry                         \
+}
+
+#endif
+
 int fs_may_remount_ro(struct super_block *sb)
 {
        struct file *file;
-
        /* Check that no files are currently opened for writing. */
-       file_list_lock();
-       list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
+       lg_global_lock(files_lglock);
+       do_file_list_for_each_entry(sb, file) {
                struct inode *inode = file->f_path.dentry->d_inode;
 
                /* File with pending delete? */
@@ -372,11 +440,11 @@ int fs_may_remount_ro(struct super_block *sb)
                /* Writeable file? */
                if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
                        goto too_bad;
-       }
-       file_list_unlock();
+       } while_file_list_for_each_entry;
+       lg_global_unlock(files_lglock);
        return 1; /* Tis' cool bro. */
 too_bad:
-       file_list_unlock();
+       lg_global_unlock(files_lglock);
        return 0;
 }
 
@@ -392,8 +460,8 @@ void mark_files_ro(struct super_block *sb)
        struct file *f;
 
 retry:
-       file_list_lock();
-       list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
+       lg_global_lock(files_lglock);
+       do_file_list_for_each_entry(sb, f) {
                struct vfsmount *mnt;
                if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
                       continue;
@@ -408,16 +476,13 @@ retry:
                        continue;
                file_release_write(f);
                mnt = mntget(f->f_path.mnt);
-               file_list_unlock();
-               /*
-                * This can sleep, so we can't hold
-                * the file_list_lock() spinlock.
-                */
+               /* This can sleep, so we can't hold the spinlock. */
+               lg_global_unlock(files_lglock);
                mnt_drop_write(mnt);
                mntput(mnt);
                goto retry;
-       }
-       file_list_unlock();
+       } while_file_list_for_each_entry;
+       lg_global_unlock(files_lglock);
 }
 
 void __init files_init(unsigned long mempages)
@@ -437,5 +502,6 @@ void __init files_init(unsigned long mempages)
        if (files_stat.max_files < NR_FILE)
                files_stat.max_files = NR_FILE;
        files_defer_init();
+       lg_lock_init(files_lglock);
        percpu_counter_init(&nr_files, 0);
 } 
index 1ee40eb9a2c05cc1751828af612c6942758b4c72..ed45a9cf5f3de46ae08e90d581a500bed9eb4dbf 100644 (file)
@@ -13,11 +13,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
 {
        struct path old_root;
 
-       write_lock(&fs->lock);
+       spin_lock(&fs->lock);
        old_root = fs->root;
        fs->root = *path;
        path_get(path);
-       write_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
        if (old_root.dentry)
                path_put(&old_root);
 }
@@ -30,11 +30,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
 {
        struct path old_pwd;
 
-       write_lock(&fs->lock);
+       spin_lock(&fs->lock);
        old_pwd = fs->pwd;
        fs->pwd = *path;
        path_get(path);
-       write_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 
        if (old_pwd.dentry)
                path_put(&old_pwd);
@@ -51,7 +51,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
                task_lock(p);
                fs = p->fs;
                if (fs) {
-                       write_lock(&fs->lock);
+                       spin_lock(&fs->lock);
                        if (fs->root.dentry == old_root->dentry
                            && fs->root.mnt == old_root->mnt) {
                                path_get(new_root);
@@ -64,7 +64,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
                                fs->pwd = *new_root;
                                count++;
                        }
-                       write_unlock(&fs->lock);
+                       spin_unlock(&fs->lock);
                }
                task_unlock(p);
        } while_each_thread(g, p);
@@ -87,10 +87,10 @@ void exit_fs(struct task_struct *tsk)
        if (fs) {
                int kill;
                task_lock(tsk);
-               write_lock(&fs->lock);
+               spin_lock(&fs->lock);
                tsk->fs = NULL;
                kill = !--fs->users;
-               write_unlock(&fs->lock);
+               spin_unlock(&fs->lock);
                task_unlock(tsk);
                if (kill)
                        free_fs_struct(fs);
@@ -104,7 +104,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
        if (fs) {
                fs->users = 1;
                fs->in_exec = 0;
-               rwlock_init(&fs->lock);
+               spin_lock_init(&fs->lock);
                fs->umask = old->umask;
                get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
        }
@@ -121,10 +121,10 @@ int unshare_fs_struct(void)
                return -ENOMEM;
 
        task_lock(current);
-       write_lock(&fs->lock);
+       spin_lock(&fs->lock);
        kill = !--fs->users;
        current->fs = new_fs;
-       write_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
        task_unlock(current);
 
        if (kill)
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(current_umask);
 /* to be mentioned only in INIT_TASK */
 struct fs_struct init_fs = {
        .users          = 1,
-       .lock           = __RW_LOCK_UNLOCKED(init_fs.lock),
+       .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
        .umask          = 0022,
 };
 
@@ -156,14 +156,14 @@ void daemonize_fs_struct(void)
 
                task_lock(current);
 
-               write_lock(&init_fs.lock);
+               spin_lock(&init_fs.lock);
                init_fs.users++;
-               write_unlock(&init_fs.lock);
+               spin_unlock(&init_fs.lock);
 
-               write_lock(&fs->lock);
+               spin_lock(&fs->lock);
                current->fs = &init_fs;
                kill = !--fs->users;
-               write_unlock(&fs->lock);
+               spin_unlock(&fs->lock);
 
                task_unlock(current);
                if (kill)
index 99800e564157ed5d3bb78b6e80c6d7f5b7d32051..6bc9e3a5a693b0fa2d5d8f09349f42b9d44ea72f 100644 (file)
@@ -94,6 +94,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
                        if (error < 0)
                                goto failed;
                        inode->i_mode = mode;
+                       inode->i_ctime = CURRENT_TIME;
                        if (error == 0) {
                                posix_acl_release(acl);
                                acl = NULL;
index dd1e55535a4e8a65e4405a2419d1a9bcde389f8c..f7dc9b5f9ef8c80560cb545d937aa9818a3b646f 100644 (file)
@@ -104,7 +104,7 @@ static char *__dentry_name(struct dentry *dentry, char *name)
                __putname(name);
                return NULL;
        }
-       strncpy(name, root, PATH_MAX);
+       strlcpy(name, root, PATH_MAX);
        if (len > p - name) {
                __putname(name);
                return NULL;
@@ -876,7 +876,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
                char *path = dentry_name(dentry);
                int err = -ENOMEM;
                if (path) {
-                       int err = hostfs_do_readlink(path, link, PATH_MAX);
+                       err = hostfs_do_readlink(path, link, PATH_MAX);
                        if (err == PATH_MAX)
                                err = -E2BIG;
                        __putname(path);
index 6b706bc60a66bbb3cecb7477f2ea72e0a091aa84..a6910e91cee8799196e991c7cab98ad11206cf01 100644 (file)
@@ -9,6 +9,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <linux/lglock.h>
+
 struct super_block;
 struct linux_binprm;
 struct path;
@@ -70,7 +72,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
 
 extern void __init mnt_init(void);
 
-extern spinlock_t vfsmount_lock;
+DECLARE_BRLOCK(vfsmount_lock);
+
 
 /*
  * fs_struct.c
@@ -80,6 +83,8 @@ extern void chroot_fs_refs(struct path *, struct path *);
 /*
  * file_table.c
  */
+extern void file_sb_list_add(struct file *f, struct super_block *sb);
+extern void file_sb_list_del(struct file *f);
 extern void mark_files_ro(struct super_block *);
 extern struct file *get_empty_filp(void);
 
index b0435dd0654d16acce790332fbb258ed378a14d3..05a38b9c4c0ecbe749ef73931933c0e089fe15ba 100644 (file)
@@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
 {
        int i;
 
-       ll_rw_block(SWRITE, *batch_count, bhs);
+       for (i = 0; i < *batch_count; i++)
+               write_dirty_buffer(bhs[i], WRITE);
+
        for (i = 0; i < *batch_count; i++) {
                struct buffer_head *bh = bhs[i];
                clear_buffer_jwrite(bh);
index 28a9ddaa0c496f85625ccab2d543fc5e0886872b..95d8c11c929ea3563b72f69e130674d6f49d5162 100644 (file)
@@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal,
        struct buffer_head *bh;
        journal_header_t *header;
        int ret;
-       int barrier_done = 0;
 
        if (is_journal_aborted(journal))
                return 0;
@@ -137,34 +136,36 @@ static int journal_write_commit_record(journal_t *journal,
 
        JBUFFER_TRACE(descriptor, "write commit block");
        set_buffer_dirty(bh);
+
        if (journal->j_flags & JFS_BARRIER) {
-               set_buffer_ordered(bh);
-               barrier_done = 1;
-       }
-       ret = sync_dirty_buffer(bh);
-       if (barrier_done)
-               clear_buffer_ordered(bh);
-       /* is it possible for another commit to fail at roughly
-        * the same time as this one?  If so, we don't want to
-        * trust the barrier flag in the super, but instead want
-        * to remember if we sent a barrier request
-        */
-       if (ret == -EOPNOTSUPP && barrier_done) {
-               char b[BDEVNAME_SIZE];
+               ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
 
-               printk(KERN_WARNING
-                       "JBD: barrier-based sync failed on %s - "
-                       "disabling barriers\n",
-                       bdevname(journal->j_dev, b));
-               spin_lock(&journal->j_state_lock);
-               journal->j_flags &= ~JFS_BARRIER;
-               spin_unlock(&journal->j_state_lock);
+               /*
+                * Is it possible for another commit to fail at roughly
+                * the same time as this one?  If so, we don't want to
+                * trust the barrier flag in the super, but instead want
+                * to remember if we sent a barrier request
+                */
+               if (ret == -EOPNOTSUPP) {
+                       char b[BDEVNAME_SIZE];
 
-               /* And try again, without the barrier */
-               set_buffer_uptodate(bh);
-               set_buffer_dirty(bh);
+                       printk(KERN_WARNING
+                               "JBD: barrier-based sync failed on %s - "
+                               "disabling barriers\n",
+                               bdevname(journal->j_dev, b));
+                       spin_lock(&journal->j_state_lock);
+                       journal->j_flags &= ~JFS_BARRIER;
+                       spin_unlock(&journal->j_state_lock);
+
+                       /* And try again, without the barrier */
+                       set_buffer_uptodate(bh);
+                       set_buffer_dirty(bh);
+                       ret = sync_dirty_buffer(bh);
+               }
+       } else {
                ret = sync_dirty_buffer(bh);
        }
+
        put_bh(bh);             /* One for getblk() */
        journal_put_journal_head(descriptor);
 
index f19ce94693d848e60ea9e34164a1c75cb56618ea..2c4b1f109da9e6bc3bcedddd423f02cb84c6bc2f 100644 (file)
@@ -1024,7 +1024,7 @@ void journal_update_superblock(journal_t *journal, int wait)
        if (wait)
                sync_dirty_buffer(bh);
        else
-               ll_rw_block(SWRITE, 1, &bh);
+               write_dirty_buffer(bh, WRITE);
 
 out:
        /* If we have just flushed the log (by marking s_start==0), then
index ad717328343acc9e1c2c66ed758c14d2aaffbe2c..d29018307e2e9cca4b97409036d18f4b9421bcac 100644 (file)
@@ -617,7 +617,7 @@ static void flush_descriptor(journal_t *journal,
        set_buffer_jwrite(bh);
        BUFFER_TRACE(bh, "write");
        set_buffer_dirty(bh);
-       ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
+       write_dirty_buffer(bh, write_op);
 }
 #endif
 
index 1c23a0f4e8a35021a3c291fa3407b976fdbb34a3..5247e7ffdcb46d400e10681535a5728ef8006ad8 100644 (file)
@@ -255,7 +255,9 @@ __flush_batch(journal_t *journal, int *batch_count)
 {
        int i;
 
-       ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs);
+       for (i = 0; i < *batch_count; i++)
+               write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
+
        for (i = 0; i < *batch_count; i++) {
                struct buffer_head *bh = journal->j_chkpt_bhs[i];
                clear_buffer_jwrite(bh);
index f52e5e8049f195ec461bfb8781584722b5da2562..7c068c189d80d713d56705e63c5b5e0bf6982ab9 100644 (file)
@@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal,
        struct commit_header *tmp;
        struct buffer_head *bh;
        int ret;
-       int barrier_done = 0;
        struct timespec now = current_kernel_time();
 
        if (is_journal_aborted(journal))
@@ -136,30 +135,22 @@ static int journal_submit_commit_record(journal_t *journal,
        if (journal->j_flags & JBD2_BARRIER &&
            !JBD2_HAS_INCOMPAT_FEATURE(journal,
                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
-               set_buffer_ordered(bh);
-               barrier_done = 1;
-       }
-       ret = submit_bh(WRITE_SYNC_PLUG, bh);
-       if (barrier_done)
-               clear_buffer_ordered(bh);
-
-       /* is it possible for another commit to fail at roughly
-        * the same time as this one?  If so, we don't want to
-        * trust the barrier flag in the super, but instead want
-        * to remember if we sent a barrier request
-        */
-       if (ret == -EOPNOTSUPP && barrier_done) {
-               printk(KERN_WARNING
-                      "JBD2: Disabling barriers on %s, "
-                      "not supported by device\n", journal->j_devname);
-               write_lock(&journal->j_state_lock);
-               journal->j_flags &= ~JBD2_BARRIER;
-               write_unlock(&journal->j_state_lock);
+               ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
+               if (ret == -EOPNOTSUPP) {
+                       printk(KERN_WARNING
+                              "JBD2: Disabling barriers on %s, "
+                              "not supported by device\n", journal->j_devname);
+                       write_lock(&journal->j_state_lock);
+                       journal->j_flags &= ~JBD2_BARRIER;
+                       write_unlock(&journal->j_state_lock);
 
-               /* And try again, without the barrier */
-               lock_buffer(bh);
-               set_buffer_uptodate(bh);
-               clear_buffer_dirty(bh);
+                       /* And try again, without the barrier */
+                       lock_buffer(bh);
+                       set_buffer_uptodate(bh);
+                       clear_buffer_dirty(bh);
+                       ret = submit_bh(WRITE_SYNC_PLUG, bh);
+               }
+       } else {
                ret = submit_bh(WRITE_SYNC_PLUG, bh);
        }
        *cbh = bh;
index ad5866aaf0f9aa88cc114fb915e1888f393ffdbb..0e8014ea6b94ad8985f1b0b842f2cea550578e67 100644 (file)
@@ -1124,7 +1124,7 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
                        set_buffer_uptodate(bh);
                }
        } else
-               ll_rw_block(SWRITE, 1, &bh);
+               write_dirty_buffer(bh, WRITE);
 
 out:
        /* If we have just flushed the log (by marking s_start==0), then
index a360b06af2e3b488933cfd5450c9af6b3f3db36f..9ad321fd63fdf73b4d7aa99ee5c78c6edbad9da4 100644 (file)
@@ -625,7 +625,7 @@ static void flush_descriptor(journal_t *journal,
        set_buffer_jwrite(bh);
        BUFFER_TRACE(bh, "write");
        set_buffer_dirty(bh);
-       ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
+       write_dirty_buffer(bh, write_op);
 }
 #endif
 
index cf4e6cdfd15b5afc091c0f2a060af90450176811..93444747237b98c03d5192870bd9740311da93f7 100644 (file)
@@ -80,6 +80,7 @@ struct mb_cache {
        struct list_head                c_cache_list;
        const char                      *c_name;
        atomic_t                        c_entry_count;
+       int                             c_max_entries;
        int                             c_bucket_bits;
        struct kmem_cache               *c_entry_cache;
        struct list_head                *c_block_hash;
@@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits)
        if (!cache->c_entry_cache)
                goto fail2;
 
+       /*
+        * Set an upper limit on the number of cache entries so that the hash
+        * chains won't grow too long.
+        */
+       cache->c_max_entries = bucket_count << 4;
+
        spin_lock(&mb_cache_spinlock);
        list_add(&cache->c_cache_list, &mb_cache_list);
        spin_unlock(&mb_cache_spinlock);
@@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache)
        kfree(cache);
 }
 
-
 /*
  * mb_cache_entry_alloc()
  *
@@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache)
 struct mb_cache_entry *
 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
 {
-       struct mb_cache_entry *ce;
-
-       ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
-       if (ce) {
+       struct mb_cache_entry *ce = NULL;
+
+       if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
+               spin_lock(&mb_cache_spinlock);
+               if (!list_empty(&mb_cache_lru_list)) {
+                       ce = list_entry(mb_cache_lru_list.next,
+                                       struct mb_cache_entry, e_lru_list);
+                       list_del_init(&ce->e_lru_list);
+                       __mb_cache_entry_unhash(ce);
+               }
+               spin_unlock(&mb_cache_spinlock);
+       }
+       if (!ce) {
+               ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
+               if (!ce)
+                       return NULL;
                atomic_inc(&cache->c_entry_count);
                INIT_LIST_HEAD(&ce->e_lru_list);
                INIT_LIST_HEAD(&ce->e_block_list);
                ce->e_cache = cache;
-               ce->e_used = 1 + MB_CACHE_WRITER;
                ce->e_queued = 0;
        }
+       ce->e_used = 1 + MB_CACHE_WRITER;
        return ce;
 }
 
index 17ea76bf2fbee41e9ff8a20616e0523781ed2315..24896e8335658c9c0ce2c81c117cb664b964da70 100644 (file)
@@ -595,15 +595,16 @@ int follow_up(struct path *path)
 {
        struct vfsmount *parent;
        struct dentry *mountpoint;
-       spin_lock(&vfsmount_lock);
+
+       br_read_lock(vfsmount_lock);
        parent = path->mnt->mnt_parent;
        if (parent == path->mnt) {
-               spin_unlock(&vfsmount_lock);
+               br_read_unlock(vfsmount_lock);
                return 0;
        }
        mntget(parent);
        mountpoint = dget(path->mnt->mnt_mountpoint);
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        dput(path->dentry);
        path->dentry = mountpoint;
        mntput(path->mnt);
@@ -685,6 +686,35 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
        follow_mount(&nd->path);
 }
 
+/*
+ * Allocate a dentry with name and parent, and perform a parent
+ * directory ->lookup on it. Returns the new dentry, or ERR_PTR
+ * on error. parent->d_inode->i_mutex must be held. d_lookup must
+ * have verified that no child exists while under i_mutex.
+ */
+static struct dentry *d_alloc_and_lookup(struct dentry *parent,
+                               struct qstr *name, struct nameidata *nd)
+{
+       struct inode *inode = parent->d_inode;
+       struct dentry *dentry;
+       struct dentry *old;
+
+       /* Don't create child dentry for a dead directory. */
+       if (unlikely(IS_DEADDIR(inode)))
+               return ERR_PTR(-ENOENT);
+
+       dentry = d_alloc(parent, name);
+       if (unlikely(!dentry))
+               return ERR_PTR(-ENOMEM);
+
+       old = inode->i_op->lookup(inode, dentry, nd);
+       if (unlikely(old)) {
+               dput(dentry);
+               dentry = old;
+       }
+       return dentry;
+}
+
 /*
  *  It's more convoluted than I'd like it to be, but... it's still fairly
  *  small and for now I'd prefer to have fast path as straight as possible.
@@ -706,9 +736,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
                        return err;
        }
 
+       /*
+        * Rename seqlock is not required here because in the off chance
+        * of a false negative due to a concurrent rename, we're going to
+        * do the non-racy lookup, below.
+        */
        dentry = __d_lookup(nd->path.dentry, name);
        if (!dentry)
                goto need_lookup;
+found:
        if (dentry->d_op && dentry->d_op->d_revalidate)
                goto need_revalidate;
 done:
@@ -724,56 +760,28 @@ need_lookup:
        mutex_lock(&dir->i_mutex);
        /*
         * First re-do the cached lookup just in case it was created
-        * while we waited for the directory semaphore..
+        * while we waited for the directory semaphore, or the first
+        * lookup failed due to an unrelated rename.
         *
-        * FIXME! This could use version numbering or similar to
-        * avoid unnecessary cache lookups.
-        *
-        * The "dcache_lock" is purely to protect the RCU list walker
-        * from concurrent renames at this point (we mustn't get false
-        * negatives from the RCU list walk here, unlike the optimistic
-        * fast walk).
-        *
-        * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
+        * This could use version numbering or similar to avoid unnecessary
+        * cache lookups, but then we'd have to do the first lookup in the
+        * non-racy way. However in the common case here, everything should
+        * be hot in cache, so would it be a big win?
         */
        dentry = d_lookup(parent, name);
-       if (!dentry) {
-               struct dentry *new;
-
-               /* Don't create child dentry for a dead directory. */
-               dentry = ERR_PTR(-ENOENT);
-               if (IS_DEADDIR(dir))
-                       goto out_unlock;
-
-               new = d_alloc(parent, name);
-               dentry = ERR_PTR(-ENOMEM);
-               if (new) {
-                       dentry = dir->i_op->lookup(dir, new, nd);
-                       if (dentry)
-                               dput(new);
-                       else
-                               dentry = new;
-               }
-out_unlock:
+       if (likely(!dentry)) {
+               dentry = d_alloc_and_lookup(parent, name, nd);
                mutex_unlock(&dir->i_mutex);
                if (IS_ERR(dentry))
                        goto fail;
                goto done;
        }
-
        /*
         * Uhhuh! Nasty case: the cache was re-populated while
         * we waited on the semaphore. Need to revalidate.
         */
        mutex_unlock(&dir->i_mutex);
-       if (dentry->d_op && dentry->d_op->d_revalidate) {
-               dentry = do_revalidate(dentry, nd);
-               if (!dentry)
-                       dentry = ERR_PTR(-ENOENT);
-       }
-       if (IS_ERR(dentry))
-               goto fail;
-       goto done;
+       goto found;
 
 need_revalidate:
        dentry = do_revalidate(dentry, nd);
@@ -1130,35 +1138,18 @@ static struct dentry *__lookup_hash(struct qstr *name,
                        goto out;
        }
 
-       dentry = __d_lookup(base, name);
-
-       /* lockess __d_lookup may fail due to concurrent d_move()
-        * in some unrelated directory, so try with d_lookup
+       /*
+        * Don't bother with __d_lookup: callers are for creat as
+        * well as unlink, so a lot of the time it would cost
+        * a double lookup.
         */
-       if (!dentry)
-               dentry = d_lookup(base, name);
+       dentry = d_lookup(base, name);
 
        if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
                dentry = do_revalidate(dentry, nd);
 
-       if (!dentry) {
-               struct dentry *new;
-
-               /* Don't create child dentry for a dead directory. */
-               dentry = ERR_PTR(-ENOENT);
-               if (IS_DEADDIR(inode))
-                       goto out;
-
-               new = d_alloc(base, name);
-               dentry = ERR_PTR(-ENOMEM);
-               if (!new)
-                       goto out;
-               dentry = inode->i_op->lookup(inode, new, nd);
-               if (!dentry)
-                       dentry = new;
-               else
-                       dput(new);
-       }
+       if (!dentry)
+               dentry = d_alloc_and_lookup(base, name, nd);
 out:
        return dentry;
 }
index 2e10cb19c5b02983e159bfe5f8039f3d08f3035d..de402eb6eafbad3df3957ca7b74ee92256c1e8c4 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
 #define HASH_SIZE (1UL << HASH_SHIFT)
 
-/* spinlock for vfsmount related operations, inplace of dcache_lock */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
-
 static int event;
 static DEFINE_IDA(mnt_id_ida);
 static DEFINE_IDA(mnt_group_ida);
+static DEFINE_SPINLOCK(mnt_id_lock);
 static int mnt_id_start = 0;
 static int mnt_group_start = 1;
 
@@ -55,6 +55,16 @@ static struct rw_semaphore namespace_sem;
 struct kobject *fs_kobj;
 EXPORT_SYMBOL_GPL(fs_kobj);
 
+/*
+ * vfsmount lock may be taken for read to prevent changes to the
+ * vfsmount hash, ie. during mountpoint lookups or walking back
+ * up the tree.
+ *
+ * It should be taken for write in all cases where the vfsmount
+ * tree or hash is modified or when a vfsmount structure is modified.
+ */
+DEFINE_BRLOCK(vfsmount_lock);
+
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
        unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
@@ -65,18 +75,21 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 
 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
 
-/* allocation is serialized by namespace_sem */
+/*
+ * allocation is serialized by namespace_sem, but we need the spinlock to
+ * serialize with freeing.
+ */
 static int mnt_alloc_id(struct vfsmount *mnt)
 {
        int res;
 
 retry:
        ida_pre_get(&mnt_id_ida, GFP_KERNEL);
-       spin_lock(&vfsmount_lock);
+       spin_lock(&mnt_id_lock);
        res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
        if (!res)
                mnt_id_start = mnt->mnt_id + 1;
-       spin_unlock(&vfsmount_lock);
+       spin_unlock(&mnt_id_lock);
        if (res == -EAGAIN)
                goto retry;
 
@@ -86,11 +99,11 @@ retry:
 static void mnt_free_id(struct vfsmount *mnt)
 {
        int id = mnt->mnt_id;
-       spin_lock(&vfsmount_lock);
+       spin_lock(&mnt_id_lock);
        ida_remove(&mnt_id_ida, id);
        if (mnt_id_start > id)
                mnt_id_start = id;
-       spin_unlock(&vfsmount_lock);
+       spin_unlock(&mnt_id_lock);
 }
 
 /*
@@ -348,7 +361,7 @@ static int mnt_make_readonly(struct vfsmount *mnt)
 {
        int ret = 0;
 
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_flags |= MNT_WRITE_HOLD;
        /*
         * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -382,15 +395,15 @@ static int mnt_make_readonly(struct vfsmount *mnt)
         */
        smp_wmb();
        mnt->mnt_flags &= ~MNT_WRITE_HOLD;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        return ret;
 }
 
 static void __mnt_unmake_readonly(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_flags &= ~MNT_READONLY;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
 
 void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
@@ -414,6 +427,7 @@ void free_vfsmnt(struct vfsmount *mnt)
 /*
  * find the first or last mount at @dentry on vfsmount @mnt depending on
  * @dir. If @dir is set return the first mount else return the last mount.
+ * vfsmount_lock must be held for read or write.
  */
 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
                              int dir)
@@ -443,10 +457,11 @@ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
 struct vfsmount *lookup_mnt(struct path *path)
 {
        struct vfsmount *child_mnt;
-       spin_lock(&vfsmount_lock);
+
+       br_read_lock(vfsmount_lock);
        if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
                mntget(child_mnt);
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        return child_mnt;
 }
 
@@ -455,6 +470,9 @@ static inline int check_mnt(struct vfsmount *mnt)
        return mnt->mnt_ns == current->nsproxy->mnt_ns;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void touch_mnt_namespace(struct mnt_namespace *ns)
 {
        if (ns) {
@@ -463,6 +481,9 @@ static void touch_mnt_namespace(struct mnt_namespace *ns)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void __touch_mnt_namespace(struct mnt_namespace *ns)
 {
        if (ns && ns->event != event) {
@@ -471,6 +492,9 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
 {
        old_path->dentry = mnt->mnt_mountpoint;
@@ -482,6 +506,9 @@ static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
        old_path->dentry->d_mounted--;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
                        struct vfsmount *child_mnt)
 {
@@ -490,6 +517,9 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
        dentry->d_mounted++;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void attach_mnt(struct vfsmount *mnt, struct path *path)
 {
        mnt_set_mountpoint(path->mnt, path->dentry, mnt);
@@ -499,7 +529,7 @@ static void attach_mnt(struct vfsmount *mnt, struct path *path)
 }
 
 /*
- * the caller must hold vfsmount_lock
+ * vfsmount lock must be held for write
  */
 static void commit_tree(struct vfsmount *mnt)
 {
@@ -623,39 +653,43 @@ static inline void __mntput(struct vfsmount *mnt)
 void mntput_no_expire(struct vfsmount *mnt)
 {
 repeat:
-       if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
-               if (likely(!mnt->mnt_pinned)) {
-                       spin_unlock(&vfsmount_lock);
-                       __mntput(mnt);
-                       return;
-               }
-               atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
-               mnt->mnt_pinned = 0;
-               spin_unlock(&vfsmount_lock);
-               acct_auto_close_mnt(mnt);
-               goto repeat;
+       if (atomic_add_unless(&mnt->mnt_count, -1, 1))
+               return;
+       br_write_lock(vfsmount_lock);
+       if (!atomic_dec_and_test(&mnt->mnt_count)) {
+               br_write_unlock(vfsmount_lock);
+               return;
+       }
+       if (likely(!mnt->mnt_pinned)) {
+               br_write_unlock(vfsmount_lock);
+               __mntput(mnt);
+               return;
        }
+       atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
+       mnt->mnt_pinned = 0;
+       br_write_unlock(vfsmount_lock);
+       acct_auto_close_mnt(mnt);
+       goto repeat;
 }
-
 EXPORT_SYMBOL(mntput_no_expire);
 
 void mnt_pin(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        mnt->mnt_pinned++;
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
 
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *mnt)
 {
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        if (mnt->mnt_pinned) {
                atomic_inc(&mnt->mnt_count);
                mnt->mnt_pinned--;
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 }
 
 EXPORT_SYMBOL(mnt_unpin);
@@ -746,12 +780,12 @@ int mnt_had_events(struct proc_mounts *p)
        struct mnt_namespace *ns = p->ns;
        int res = 0;
 
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        if (p->event != ns->event) {
                p->event = ns->event;
                res = 1;
        }
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
 
        return res;
 }
@@ -952,12 +986,12 @@ int may_umount_tree(struct vfsmount *mnt)
        int minimum_refs = 0;
        struct vfsmount *p;
 
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        for (p = mnt; p; p = next_mnt(p, mnt)) {
                actual_refs += atomic_read(&p->mnt_count);
                minimum_refs += 2;
        }
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
 
        if (actual_refs > minimum_refs)
                return 0;
@@ -984,10 +1018,10 @@ int may_umount(struct vfsmount *mnt)
 {
        int ret = 1;
        down_read(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_read_lock(vfsmount_lock);
        if (propagate_mount_busy(mnt, 2))
                ret = 0;
-       spin_unlock(&vfsmount_lock);
+       br_read_unlock(vfsmount_lock);
        up_read(&namespace_sem);
        return ret;
 }
@@ -1003,13 +1037,14 @@ void release_mounts(struct list_head *head)
                if (mnt->mnt_parent != mnt) {
                        struct dentry *dentry;
                        struct vfsmount *m;
-                       spin_lock(&vfsmount_lock);
+
+                       br_write_lock(vfsmount_lock);
                        dentry = mnt->mnt_mountpoint;
                        m = mnt->mnt_parent;
                        mnt->mnt_mountpoint = mnt->mnt_root;
                        mnt->mnt_parent = mnt;
                        m->mnt_ghosts--;
-                       spin_unlock(&vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                        dput(dentry);
                        mntput(m);
                }
@@ -1017,6 +1052,10 @@ void release_mounts(struct list_head *head)
        }
 }
 
+/*
+ * vfsmount lock must be held for write
+ * namespace_sem must be held for write
+ */
 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
 {
        struct vfsmount *p;
@@ -1107,7 +1146,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
        }
 
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        event++;
 
        if (!(flags & MNT_DETACH))
@@ -1119,7 +1158,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
                        umount_tree(mnt, 1, &umount_list);
                retval = 0;
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        return retval;
@@ -1231,19 +1270,19 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
                        q = clone_mnt(p, p->mnt_root, flag);
                        if (!q)
                                goto Enomem;
-                       spin_lock(&vfsmount_lock);
+                       br_write_lock(vfsmount_lock);
                        list_add_tail(&q->mnt_list, &res->mnt_list);
                        attach_mnt(q, &path);
-                       spin_unlock(&vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                }
        }
        return res;
 Enomem:
        if (res) {
                LIST_HEAD(umount_list);
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                umount_tree(res, 0, &umount_list);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
                release_mounts(&umount_list);
        }
        return NULL;
@@ -1262,9 +1301,9 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        LIST_HEAD(umount_list);
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        umount_tree(mnt, 0, &umount_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
 }
@@ -1392,7 +1431,7 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
        if (err)
                goto out_cleanup_ids;
 
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
 
        if (IS_MNT_SHARED(dest_mnt)) {
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1411,7 +1450,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
                list_del_init(&child->mnt_hash);
                commit_tree(child);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
+
        return 0;
 
  out_cleanup_ids:
@@ -1466,10 +1506,10 @@ static int do_change_type(struct path *path, int flag)
                        goto out_unlock;
        }
 
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
                change_mnt_propagation(m, type);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 
  out_unlock:
        up_write(&namespace_sem);
@@ -1513,9 +1553,10 @@ static int do_loopback(struct path *path, char *old_name,
        err = graft_tree(mnt, path);
        if (err) {
                LIST_HEAD(umount_list);
-               spin_lock(&vfsmount_lock);
+
+               br_write_lock(vfsmount_lock);
                umount_tree(mnt, 0, &umount_list);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
                release_mounts(&umount_list);
        }
 
@@ -1568,16 +1609,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        else
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
                path->mnt->mnt_flags = mnt_flags;
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
        }
        up_write(&sb->s_umount);
        if (!err) {
-               spin_lock(&vfsmount_lock);
+               br_write_lock(vfsmount_lock);
                touch_mnt_namespace(path->mnt->mnt_ns);
-               spin_unlock(&vfsmount_lock);
+               br_write_unlock(vfsmount_lock);
        }
        return err;
 }
@@ -1754,7 +1795,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                return;
 
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
 
        /* extract from the expiration list every vfsmount that matches the
         * following criteria:
@@ -1773,7 +1814,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                touch_mnt_namespace(mnt->mnt_ns);
                umount_tree(mnt, 1, &umounts);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
 
        release_mounts(&umounts);
@@ -1830,6 +1871,8 @@ resume:
 /*
  * process a list of expirable mountpoints with the intent of discarding any
  * submounts of a specific parent mountpoint
+ *
+ * vfsmount_lock must be held for write
  */
 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
 {
@@ -2048,9 +2091,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
                kfree(new_ns);
                return ERR_PTR(-ENOMEM);
        }
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
 
        /*
         * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2244,7 +2287,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
                goto out2; /* not attached */
        /* make sure we can reach put_old from new_root */
        tmp = old.mnt;
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        if (tmp != new.mnt) {
                for (;;) {
                        if (tmp->mnt_parent == tmp)
@@ -2264,7 +2307,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* mount new_root on / */
        attach_mnt(new.mnt, &root_parent);
        touch_mnt_namespace(current->nsproxy->mnt_ns);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        chroot_fs_refs(&root, &new);
        error = 0;
        path_put(&root_parent);
@@ -2279,7 +2322,7 @@ out1:
 out0:
        return error;
 out3:
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        goto out2;
 }
 
@@ -2326,6 +2369,8 @@ void __init mnt_init(void)
        for (u = 0; u < HASH_SIZE; u++)
                INIT_LIST_HEAD(&mount_hashtable[u]);
 
+       br_lock_init(vfsmount_lock);
+
        err = sysfs_init();
        if (err)
                printk(KERN_WARNING "%s: sysfs_init error: %d\n",
@@ -2344,9 +2389,9 @@ void put_mnt_ns(struct mnt_namespace *ns)
        if (!atomic_dec_and_test(&ns->count))
                return;
        down_write(&namespace_sem);
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        umount_tree(ns->root, 0, &umount_list);
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        kfree(ns);
index bee60c04109a29b61d9cf4baffece2d18f7f75c3..922263393c765664f6b5598f4c144093ec9e582c 100644 (file)
@@ -175,24 +175,24 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
 {
        struct the_nilfs *nilfs = sbi->s_nilfs;
        int err;
-       int barrier_done = 0;
 
-       if (nilfs_test_opt(sbi, BARRIER)) {
-               set_buffer_ordered(nilfs->ns_sbh[0]);
-               barrier_done = 1;
-       }
  retry:
        set_buffer_dirty(nilfs->ns_sbh[0]);
-       err = sync_dirty_buffer(nilfs->ns_sbh[0]);
-       if (err == -EOPNOTSUPP && barrier_done) {
-               nilfs_warning(sbi->s_super, __func__,
-                             "barrier-based sync failed. "
-                             "disabling barriers\n");
-               nilfs_clear_opt(sbi, BARRIER);
-               barrier_done = 0;
-               clear_buffer_ordered(nilfs->ns_sbh[0]);
-               goto retry;
+
+       if (nilfs_test_opt(sbi, BARRIER)) {
+               err = __sync_dirty_buffer(nilfs->ns_sbh[0],
+                                         WRITE_SYNC | WRITE_BARRIER);
+               if (err == -EOPNOTSUPP) {
+                       nilfs_warning(sbi->s_super, __func__,
+                                     "barrier-based sync failed. "
+                                     "disabling barriers\n");
+                       nilfs_clear_opt(sbi, BARRIER);
+                       goto retry;
+               }
+       } else {
+               err = sync_dirty_buffer(nilfs->ns_sbh[0]);
        }
+
        if (unlikely(err)) {
                printk(KERN_ERR
                       "NILFS: unable to write superblock (err=%d)\n", err);
index 630715f9f73d0e9ae4370a339eb5163a8bb55ca0..d74e1983e8dc478145dc8da369c0479bb947d8c2 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -675,7 +675,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
        f->f_path.mnt = mnt;
        f->f_pos = 0;
        f->f_op = fops_get(inode->i_fop);
-       file_move(f, &inode->i_sb->s_files);
+       file_sb_list_add(f, inode->i_sb);
 
        error = security_dentry_open(f, cred);
        if (error)
@@ -721,7 +721,7 @@ cleanup_all:
                        mnt_drop_write(mnt);
                }
        }
-       file_kill(f);
+       file_sb_list_del(f);
        f->f_path.dentry = NULL;
        f->f_path.mnt = NULL;
 cleanup_file:
index 5cc564a83149a5fc311b57d87793febbdcfd4f27..8066b8dd748f6800a09694ee62c2a0b33c97d20a 100644 (file)
@@ -126,6 +126,9 @@ static int do_make_slave(struct vfsmount *mnt)
        return 0;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 void change_mnt_propagation(struct vfsmount *mnt, int type)
 {
        if (type == MS_SHARED) {
@@ -270,12 +273,12 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
                prev_src_mnt  = child;
        }
 out:
-       spin_lock(&vfsmount_lock);
+       br_write_lock(vfsmount_lock);
        while (!list_empty(&tmp_list)) {
                child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
                umount_tree(child, 0, &umount_list);
        }
-       spin_unlock(&vfsmount_lock);
+       br_write_unlock(vfsmount_lock);
        release_mounts(&umount_list);
        return ret;
 }
@@ -296,6 +299,8 @@ static inline int do_refcount_check(struct vfsmount *mnt, int count)
  * other mounts its parent propagates to.
  * Check if any of these mounts that **do not have submounts**
  * have more references than 'refcnt'. If so return busy.
+ *
+ * vfsmount lock must be held for read or write
  */
 int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
 {
@@ -353,6 +358,8 @@ static void __propagate_umount(struct vfsmount *mnt)
  * collect all mounts that receive propagation from the mount in @list,
  * and return these additional mounts in the same list.
  * @list: the list of mounts to be unmounted.
+ *
+ * vfsmount lock must be held for write
  */
 int propagate_umount(struct list_head *list)
 {
index ae35413dcbe1322a0ace18dbff0688138f585764..caa758377d66b8ba5530d029635d8cdfa98048ba 100644 (file)
@@ -83,6 +83,7 @@ void reiserfs_evict_inode(struct inode *inode)
        dquot_drop(inode);
        inode->i_blocks = 0;
        reiserfs_write_unlock_once(inode->i_sb, depth);
+       return;
 
 no_delete:
        end_writeback(inode);
index 1ec952b1f036fb30266cc8fea1e7b3795444fd37..812e2c05aa29eeda01bf94323234cdf260344353 100644 (file)
@@ -2311,7 +2311,7 @@ static int journal_read_transaction(struct super_block *sb,
        /* flush out the real blocks */
        for (i = 0; i < get_desc_trans_len(desc); i++) {
                set_buffer_dirty(real_blocks[i]);
-               ll_rw_block(SWRITE, 1, real_blocks + i);
+               write_dirty_buffer(real_blocks[i], WRITE);
        }
        for (i = 0; i < get_desc_trans_len(desc); i++) {
                wait_on_buffer(real_blocks[i]);
index 9674ab2c8718c3f1061f3556e6ec3e9b479cb8a9..8819e3a7ff203fb521b537672d16d5e2d4cea80d 100644 (file)
@@ -54,7 +54,22 @@ static struct super_block *alloc_super(struct file_system_type *type)
                        s = NULL;
                        goto out;
                }
+#ifdef CONFIG_SMP
+               s->s_files = alloc_percpu(struct list_head);
+               if (!s->s_files) {
+                       security_sb_free(s);
+                       kfree(s);
+                       s = NULL;
+                       goto out;
+               } else {
+                       int i;
+
+                       for_each_possible_cpu(i)
+                               INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
+               }
+#else
                INIT_LIST_HEAD(&s->s_files);
+#endif
                INIT_LIST_HEAD(&s->s_instances);
                INIT_HLIST_HEAD(&s->s_anon);
                INIT_LIST_HEAD(&s->s_inodes);
@@ -108,6 +123,9 @@ out:
  */
 static inline void destroy_super(struct super_block *s)
 {
+#ifdef CONFIG_SMP
+       free_percpu(s->s_files);
+#endif
        security_sb_free(s);
        kfree(s->s_subtype);
        kfree(s->s_options);
index 048484fb10d28f12722052955abb043ecc0ac096..46f7a807bbc1ec8313af3df1a4c08c2afb3498eb 100644 (file)
@@ -114,10 +114,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
        
        unlock_super (sb);
@@ -207,10 +205,8 @@ do_more:
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
 
        if (overflow) {
                fragment += count;
@@ -558,10 +554,8 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
 
        UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
@@ -680,10 +674,8 @@ cg_found:
 succed:
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
 
        result += cgno * uspi->s_fpg;
index 428017e018fe63268b216b9ff2fce6cd83d547b8..2eabf04af3de12e98d0fbf812879825e7247e619 100644 (file)
@@ -113,10 +113,8 @@ void ufs_free_inode (struct inode * inode)
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        
        sb->s_dirt = 1;
        unlock_super (sb);
@@ -156,10 +154,8 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
 
        fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
        ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer(UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
 
        UFSD("EXIT\n");
 }
@@ -290,10 +286,8 @@ cg_found:
        }
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS) {
-               ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-               ubh_wait_on_buffer (UCPI_UBH(ucpi));
-       }
+       if (sb->s_flags & MS_SYNCHRONOUS)
+               ubh_sync_block(UCPI_UBH(ucpi));
        sb->s_dirt = 1;
 
        inode->i_ino = cg * uspi->s_ipg + bit;
index 34d5cb1353204ea8a2a7cf348750d66f4519613d..a58f9155fc9a7baea4169efec7529434035d8458 100644 (file)
@@ -243,10 +243,8 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
                ubh_bforget(ind_ubh);
                ind_ubh = NULL;
        }
-       if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
-               ubh_ll_rw_block(SWRITE, ind_ubh);
-               ubh_wait_on_buffer (ind_ubh);
-       }
+       if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
+               ubh_sync_block(ind_ubh);
        ubh_brelse (ind_ubh);
        
        UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -307,10 +305,8 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
                ubh_bforget(dind_bh);
                dind_bh = NULL;
        }
-       if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
-               ubh_ll_rw_block(SWRITE, dind_bh);
-               ubh_wait_on_buffer (dind_bh);
-       }
+       if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
+               ubh_sync_block(dind_bh);
        ubh_brelse (dind_bh);
        
        UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -367,10 +363,8 @@ static int ufs_trunc_tindirect(struct inode *inode)
                ubh_bforget(tind_bh);
                tind_bh = NULL;
        }
-       if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
-               ubh_ll_rw_block(SWRITE, tind_bh);
-               ubh_wait_on_buffer (tind_bh);
-       }
+       if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
+               ubh_sync_block(tind_bh);
        ubh_brelse (tind_bh);
        
        UFSD("EXIT: ino %lu\n", inode->i_ino);
index 85a7fc9e4a4e345161a1c8a8a26e0eebcb834aae..d2c36d53fe66e8a827d2f232309bc11d7df0afec 100644 (file)
@@ -113,21 +113,17 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
        }
 }
 
-void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
+void ubh_sync_block(struct ufs_buffer_head *ubh)
 {
-       if (!ubh)
-               return;
+       if (ubh) {
+               unsigned i;
 
-       ll_rw_block(rw, ubh->count, ubh->bh);
-}
+               for (i = 0; i < ubh->count; i++)
+                       write_dirty_buffer(ubh->bh[i], WRITE);
 
-void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
-{
-       unsigned i;
-       if (!ubh)
-               return;
-       for ( i = 0; i < ubh->count; i++ )
-               wait_on_buffer (ubh->bh[i]);
+               for (i = 0; i < ubh->count; i++)
+                       wait_on_buffer(ubh->bh[i]);
+       }
 }
 
 void ubh_bforget (struct ufs_buffer_head * ubh)
index 0466036912f1adb41ccf85eb2d3dbd81b669ef4e..9f8775ce381c403647e84d1df9d45ae78c80a573 100644 (file)
@@ -269,8 +269,7 @@ extern void ubh_brelse (struct ufs_buffer_head *);
 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
 extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
-extern void ubh_ll_rw_block(int, struct ufs_buffer_head *);
-extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
+extern void ubh_sync_block(struct ufs_buffer_head *);
 extern void ubh_bforget (struct ufs_buffer_head *);
 extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
 #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
index df84e3b04555f495356dfb15e74dfcd3806b1523..d89dec864d42547b84708e3502bf81f98108e76f 100644 (file)
@@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs);
 #endif
 
 #ifndef sys_execve
-asmlinkage long sys_execve(char __user *filename, char __user * __user *argv,
-                       char __user * __user *envp, struct pt_regs *regs);
+asmlinkage long sys_execve(const char __user *filename,
+                          const char __user *const __user *argv,
+                          const char __user *const __user *envp,
+                          struct pt_regs *regs);
 #endif
 
 #ifndef sys_mmap2
index 43e649a72529afa8282f7da2029ab8e6759f0a97..ec94c12f21da5ab9d8098f7afbdf59d2cc7886b7 100644 (file)
@@ -32,7 +32,6 @@ enum bh_state_bits {
        BH_Delay,       /* Buffer is not yet allocated on disk */
        BH_Boundary,    /* Block is followed by a discontiguity */
        BH_Write_EIO,   /* I/O error on write */
-       BH_Ordered,     /* ordered write */
        BH_Eopnotsupp,  /* operation not supported (barrier) */
        BH_Unwritten,   /* Buffer is allocated on disk but not written */
        BH_Quiet,       /* Buffer Error Prinks to be quiet */
@@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write)
 BUFFER_FNS(Delay, delay)
 BUFFER_FNS(Boundary, boundary)
 BUFFER_FNS(Write_EIO, write_io_error)
-BUFFER_FNS(Ordered, ordered)
 BUFFER_FNS(Eopnotsupp, eopnotsupp)
 BUFFER_FNS(Unwritten, unwritten)
 
@@ -183,6 +181,8 @@ void unlock_buffer(struct buffer_head *bh);
 void __lock_buffer(struct buffer_head *bh);
 void ll_rw_block(int, int, struct buffer_head * bh[]);
 int sync_dirty_buffer(struct buffer_head *bh);
+int __sync_dirty_buffer(struct buffer_head *bh, int rw);
+void write_dirty_buffer(struct buffer_head *bh, int rw);
 int submit_bh(int, struct buffer_head *);
 void write_boundary_block(struct block_device *bdev,
                        sector_t bblock, unsigned blocksize);
index 9a96b4d83fc126a92adb9c87f7f1e5ae62bc0056..76041b6147582ef62eb0daedafbf1771a8e844c6 100644 (file)
@@ -125,9 +125,6 @@ struct inodes_stat_t {
  *                     block layer could (in theory) choose to ignore this
  *                     request if it runs into resource problems.
  * WRITE               A normal async write. Device will be plugged.
- * SWRITE              Like WRITE, but a special case for ll_rw_block() that
- *                     tells it to lock the buffer first. Normally a buffer
- *                     must be locked before doing IO.
  * WRITE_SYNC_PLUG     Synchronous write. Identical to WRITE, but passes down
  *                     the hint that someone will be waiting on this IO
  *                     shortly. The device must still be unplugged explicitly,
@@ -138,9 +135,6 @@ struct inodes_stat_t {
  *                     immediately after submission. The write equivalent
  *                     of READ_SYNC.
  * WRITE_ODIRECT_PLUG  Special case write for O_DIRECT only.
- * SWRITE_SYNC
- * SWRITE_SYNC_PLUG    Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
- *                     See SWRITE.
  * WRITE_BARRIER       Like WRITE_SYNC, but tells the block layer that all
  *                     previously submitted writes must be safely on storage
  *                     before this one is started. Also guarantees that when
@@ -155,7 +149,6 @@ struct inodes_stat_t {
 #define READ                   0
 #define WRITE                  RW_MASK
 #define READA                  RWA_MASK
-#define SWRITE                 (WRITE | READA)
 
 #define READ_SYNC              (READ | REQ_SYNC | REQ_UNPLUG)
 #define READ_META              (READ | REQ_META)
@@ -165,8 +158,6 @@ struct inodes_stat_t {
 #define WRITE_META             (WRITE | REQ_META)
 #define WRITE_BARRIER          (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
                                 REQ_HARDBARRIER)
-#define SWRITE_SYNC_PLUG       (SWRITE | REQ_SYNC | REQ_NOIDLE)
-#define SWRITE_SYNC            (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
 
 /*
  * These aren't really reads or writes, they pass down information about
@@ -929,6 +920,9 @@ struct file {
 #define f_vfsmnt       f_path.mnt
        const struct file_operations    *f_op;
        spinlock_t              f_lock;  /* f_ep_links, f_flags, no IRQ */
+#ifdef CONFIG_SMP
+       int                     f_sb_list_cpu;
+#endif
        atomic_long_t           f_count;
        unsigned int            f_flags;
        fmode_t                 f_mode;
@@ -953,9 +947,6 @@ struct file {
        unsigned long f_mnt_write_state;
 #endif
 };
-extern spinlock_t files_lock;
-#define file_list_lock() spin_lock(&files_lock);
-#define file_list_unlock() spin_unlock(&files_lock);
 
 #define get_file(x)    atomic_long_inc(&(x)->f_count)
 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
@@ -1346,7 +1337,11 @@ struct super_block {
 
        struct list_head        s_inodes;       /* all inodes */
        struct hlist_head       s_anon;         /* anonymous dentries for (nfs) exporting */
+#ifdef CONFIG_SMP
+       struct list_head __percpu *s_files;
+#else
        struct list_head        s_files;
+#endif
        /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
        struct list_head        s_dentry_lru;   /* unused dentry lru */
        int                     s_nr_dentry_unused;     /* # of dentry on lru */
@@ -2197,8 +2192,6 @@ static inline void insert_inode_hash(struct inode *inode) {
        __insert_inode_hash(inode, inode->i_ino);
 }
 
-extern void file_move(struct file *f, struct list_head *list);
-extern void file_kill(struct file *f);
 #ifdef CONFIG_BLOCK
 extern void submit_bio(int, struct bio *);
 extern int bdev_read_only(struct block_device *);
index eca3d5202138f68bb30dd1a308aaf4e121171663..a42b5bf02f8bcfec3ea20ecef765df3f4d690250 100644 (file)
@@ -5,7 +5,7 @@
 
 struct fs_struct {
        int users;
-       rwlock_t lock;
+       spinlock_t lock;
        int umask;
        int in_exec;
        struct path root, pwd;
@@ -23,29 +23,29 @@ extern int unshare_fs_struct(void);
 
 static inline void get_fs_root(struct fs_struct *fs, struct path *root)
 {
-       read_lock(&fs->lock);
+       spin_lock(&fs->lock);
        *root = fs->root;
        path_get(root);
-       read_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
 {
-       read_lock(&fs->lock);
+       spin_lock(&fs->lock);
        *pwd = fs->pwd;
        path_get(pwd);
-       read_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
                                       struct path *pwd)
 {
-       read_lock(&fs->lock);
+       spin_lock(&fs->lock);
        *root = fs->root;
        path_get(root);
        *pwd = fs->pwd;
        path_get(pwd);
-       read_unlock(&fs->lock);
+       spin_unlock(&fs->lock);
 }
 
 #endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
new file mode 100644 (file)
index 0000000..b288cb7
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Specialised local-global spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * "local/global locks" (lglocks) can be used to:
+ *
+ * - Provide fast exclusive access to per-CPU data, with exclusive access to
+ *   another CPU's data allowed but possibly subject to contention, and to
+ *   provide very slow exclusive access to all per-CPU data.
+ * - Or to provide very fast and scalable read serialisation, and to provide
+ *   very slow exclusive serialisation of data (not necessarily per-CPU data).
+ *
+ * Brlocks are also implemented as a short-hand notation for the latter use
+ * case.
+ *
+ * Copyright 2009, 2010, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_LGLOCK_H
+#define __LINUX_LGLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/lockdep.h>
+#include <linux/percpu.h>
+
+/* can make br locks by using local lock for read side, global lock for write */
+#define br_lock_init(name)     name##_lock_init()
+#define br_read_lock(name)     name##_local_lock()
+#define br_read_unlock(name)   name##_local_unlock()
+#define br_write_lock(name)    name##_global_lock_online()
+#define br_write_unlock(name)  name##_global_unlock_online()
+
+#define DECLARE_BRLOCK(name)   DECLARE_LGLOCK(name)
+#define DEFINE_BRLOCK(name)    DEFINE_LGLOCK(name)
+
+
+#define lg_lock_init(name)     name##_lock_init()
+#define lg_local_lock(name)    name##_local_lock()
+#define lg_local_unlock(name)  name##_local_unlock()
+#define lg_local_lock_cpu(name, cpu)   name##_local_lock_cpu(cpu)
+#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
+#define lg_global_lock(name)   name##_global_lock()
+#define lg_global_unlock(name) name##_global_unlock()
+#define lg_global_lock_online(name) name##_global_lock_online()
+#define lg_global_unlock_online(name) name##_global_unlock_online()
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define LOCKDEP_INIT_MAP lockdep_init_map
+
+#define DEFINE_LGLOCK_LOCKDEP(name)                                    \
+ struct lock_class_key name##_lock_key;                                        \
+ struct lockdep_map name##_lock_dep_map;                               \
+ EXPORT_SYMBOL(name##_lock_dep_map)
+
+#else
+#define LOCKDEP_INIT_MAP(a, b, c, d)
+
+#define DEFINE_LGLOCK_LOCKDEP(name)
+#endif
+
+
+#define DECLARE_LGLOCK(name)                                           \
+ extern void name##_lock_init(void);                                   \
+ extern void name##_local_lock(void);                                  \
+ extern void name##_local_unlock(void);                                        \
+ extern void name##_local_lock_cpu(int cpu);                           \
+ extern void name##_local_unlock_cpu(int cpu);                         \
+ extern void name##_global_lock(void);                                 \
+ extern void name##_global_unlock(void);                               \
+ extern void name##_global_lock_online(void);                          \
+ extern void name##_global_unlock_online(void);                                \
+
+#define DEFINE_LGLOCK(name)                                            \
+                                                                       \
+ DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
+ DEFINE_LGLOCK_LOCKDEP(name);                                          \
+                                                                       \
+ void name##_lock_init(void) {                                         \
+       int i;                                                          \
+       LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+       for_each_possible_cpu(i) {                                      \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
+       }                                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_lock_init);                                      \
+                                                                       \
+ void name##_local_lock(void) {                                                \
+       arch_spinlock_t *lock;                                          \
+       preempt_disable();                                              \
+       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
+       lock = &__get_cpu_var(name##_lock);                             \
+       arch_spin_lock(lock);                                           \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_lock);                                     \
+                                                                       \
+ void name##_local_unlock(void) {                                      \
+       arch_spinlock_t *lock;                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
+       lock = &__get_cpu_var(name##_lock);                             \
+       arch_spin_unlock(lock);                                         \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_unlock);                                   \
+                                                                       \
+ void name##_local_lock_cpu(int cpu) {                                 \
+       arch_spinlock_t *lock;                                          \
+       preempt_disable();                                              \
+       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
+       lock = &per_cpu(name##_lock, cpu);                              \
+       arch_spin_lock(lock);                                           \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_lock_cpu);                                 \
+                                                                       \
+ void name##_local_unlock_cpu(int cpu) {                               \
+       arch_spinlock_t *lock;                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
+       lock = &per_cpu(name##_lock, cpu);                              \
+       arch_spin_unlock(lock);                                         \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_local_unlock_cpu);                               \
+                                                                       \
+ void name##_global_lock_online(void) {                                        \
+       int i;                                                          \
+       preempt_disable();                                              \
+       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_lock(lock);                                   \
+       }                                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_lock_online);                             \
+                                                                       \
+ void name##_global_unlock_online(void) {                              \
+       int i;                                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_unlock(lock);                                 \
+       }                                                               \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_unlock_online);                           \
+                                                                       \
+ void name##_global_lock(void) {                                       \
+       int i;                                                          \
+       preempt_disable();                                              \
+       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_lock(lock);                                   \
+       }                                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_lock);                                    \
+                                                                       \
+ void name##_global_unlock(void) {                                     \
+       int i;                                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
+       for_each_online_cpu(i) {                                        \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_unlock(lock);                                 \
+       }                                                               \
+       preempt_enable();                                               \
+ }                                                                     \
+ EXPORT_SYMBOL(name##_global_unlock);
+#endif
index ae0a5286f558f334859a6d5a2163c18bcc974c3e..92e52a1e6af3fd8478bb451f04d34a3c63b1625f 100644 (file)
@@ -213,6 +213,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @dma_alignment: SPI controller constraint on DMA buffers alignment.
  * @mode_bits: flags understood by this controller driver
  * @flags: other constraints relevant to this driver
+ * @bus_lock_spinlock: spinlock for SPI bus locking
+ * @bus_lock_mutex: mutex for SPI bus locking
+ * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
  * @setup: updates the device mode and clocking records used by a
  *     device's SPI controller; protocol code may call this.  This
  *     must fail if an unrecognized or unsupported mode is requested.
index 1437da3ddc629b7dc285e7cb4ce11e2b5400de7a..67d64e6efe7a8c95d775b6418d2c1c8d728d667d 100644 (file)
@@ -329,6 +329,13 @@ struct tty_struct {
        struct tty_port *port;
 };
 
+/* Each of a tty's open files has private_data pointing to tty_file_private */
+struct tty_file_private {
+       struct tty_struct *tty;
+       struct file *file;
+       struct list_head list;
+};
+
 /* tty magic number */
 #define TTY_MAGIC              0x5401
 
@@ -458,6 +465,7 @@ extern void proc_clear_tty(struct task_struct *p);
 extern struct tty_struct *get_current_tty(void);
 extern void tty_default_fops(struct file_operations *fops);
 extern struct tty_struct *alloc_tty_struct(void);
+extern void tty_add_file(struct tty_struct *tty, struct file *file);
 extern void free_tty_struct(struct tty_struct *tty);
 extern void initialize_tty_struct(struct tty_struct *tty,
                struct tty_driver *driver, int idx);
@@ -470,6 +478,7 @@ extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
 extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
 
 extern struct mutex tty_mutex;
+extern spinlock_t tty_files_lock;
 
 extern void tty_write_unlock(struct tty_struct *tty);
 extern int tty_write_lock(struct tty_struct *tty, int ndelay);
index 6a664c3f7c1e426dd0b74129ce9cf81cb07fefa0..7dc97d12253c1bdc494b939e5e856b1b1fa6fc2c 100644 (file)
@@ -1707,6 +1707,7 @@ struct snd_emu10k1 {
        unsigned int card_type;                 /* EMU10K1_CARD_* */
        unsigned int ecard_ctrl;                /* ecard control bits */
        unsigned long dma_mask;                 /* PCI DMA mask */
+       unsigned int delay_pcm_irq;             /* in samples */
        int max_cache_pages;                    /* max memory size / PAGE_SIZE */
        struct snd_dma_buffer silent_page;      /* silent page */
        struct snd_dma_buffer ptb_pages;        /* page table pages */
index 98b450876f93878b437bbbc6efe6abd1c4028b29..856eac3ec52eb1a11fa6405d3e870da796c778ee 100644 (file)
@@ -752,13 +752,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
        struct fs_struct *fs = current->fs;
        if (clone_flags & CLONE_FS) {
                /* tsk->fs is already what we want */
-               write_lock(&fs->lock);
+               spin_lock(&fs->lock);
                if (fs->in_exec) {
-                       write_unlock(&fs->lock);
+                       spin_unlock(&fs->lock);
                        return -EAGAIN;
                }
                fs->users++;
-               write_unlock(&fs->lock);
+               spin_unlock(&fs->lock);
                return 0;
        }
        tsk->fs = copy_fs_struct(fs);
@@ -1676,13 +1676,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
 
                if (new_fs) {
                        fs = current->fs;
-                       write_lock(&fs->lock);
+                       spin_lock(&fs->lock);
                        current->fs = new_fs;
                        if (--fs->users)
                                new_fs = NULL;
                        else
                                new_fs = fs;
-                       write_unlock(&fs->lock);
+                       spin_unlock(&fs->lock);
                }
 
                if (new_mm) {
index 3632ce87674f88dfd6c4ce5c8ed09eb184ace1a3..19cccc3c302871beae5fd39ad937b0791a2e785d 100644 (file)
@@ -3846,6 +3846,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                        rpos = reader->read;
                        pos += size;
 
+                       if (rpos >= commit)
+                               break;
+
                        event = rb_reader_event(cpu_buffer);
                        size = rb_event_length(event);
                } while (len > size);
index ba14a22be4cc4aca4af73121a09910dcbebaf050..9ec59f541156625b5c4b0aea9267086c928ae07a 100644 (file)
@@ -3463,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
 {
        char *buf;
+       size_t written;
 
        if (tracing_disabled)
                return -EINVAL;
@@ -3484,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        } else
                buf[cnt] = '\0';
 
-       cnt = mark_printk("%s", buf);
+       written = mark_printk("%s", buf);
        kfree(buf);
-       *fpos += cnt;
+       *fpos += written;
 
-       return cnt;
+       /* don't tell userspace we wrote more - it might confuse them */
+       if (written > cnt)
+               written = cnt;
+
+       return written;
 }
 
 static int tracing_clock_show(struct seq_file *m, void *v)
index 09b4fa6e4d3be8b83758c48a529d01a6f18010da..4c758f146328f18ce82a318fb60a0413006aca8f 100644 (file)
@@ -598,88 +598,165 @@ out:
        return ret;
 }
 
-static void print_event_fields(struct trace_seq *s, struct list_head *head)
+enum {
+       FORMAT_HEADER           = 1,
+       FORMAT_PRINTFMT         = 2,
+};
+
+static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
+       struct ftrace_event_call *call = m->private;
        struct ftrace_event_field *field;
+       struct list_head *head;
 
-       list_for_each_entry_reverse(field, head, link) {
-               /*
-                * Smartly shows the array type(except dynamic array).
-                * Normal:
-                *      field:TYPE VAR
-                * If TYPE := TYPE[LEN], it is shown:
-                *      field:TYPE VAR[LEN]
-                */
-               const char *array_descriptor = strchr(field->type, '[');
+       (*pos)++;
 
-               if (!strncmp(field->type, "__data_loc", 10))
-                       array_descriptor = NULL;
+       switch ((unsigned long)v) {
+       case FORMAT_HEADER:
+               head = &ftrace_common_fields;
 
-               if (!array_descriptor) {
-                       trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
-                                       "\tsize:%u;\tsigned:%d;\n",
-                                       field->type, field->name, field->offset,
-                                       field->size, !!field->is_signed);
-               } else {
-                       trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
-                                       "\tsize:%u;\tsigned:%d;\n",
-                                       (int)(array_descriptor - field->type),
-                                       field->type, field->name,
-                                       array_descriptor, field->offset,
-                                       field->size, !!field->is_signed);
-               }
+               if (unlikely(list_empty(head)))
+                       return NULL;
+
+               field = list_entry(head->prev, struct ftrace_event_field, link);
+               return field;
+
+       case FORMAT_PRINTFMT:
+               /* all done */
+               return NULL;
+       }
+
+       head = trace_get_fields(call);
+
+       /*
+        * To separate common fields from event fields, the
+        * LSB is set on the first event field. Clear it in case.
+        */
+       v = (void *)((unsigned long)v & ~1L);
+
+       field = v;
+       /*
+        * If this is a common field, and at the end of the list, then
+        * continue with main list.
+        */
+       if (field->link.prev == &ftrace_common_fields) {
+               if (unlikely(list_empty(head)))
+                       return NULL;
+               field = list_entry(head->prev, struct ftrace_event_field, link);
+               /* Set the LSB to notify f_show to print an extra newline */
+               field = (struct ftrace_event_field *)
+                       ((unsigned long)field | 1);
+               return field;
        }
+
+       /* If we are done tell f_show to print the format */
+       if (field->link.prev == head)
+               return (void *)FORMAT_PRINTFMT;
+
+       field = list_entry(field->link.prev, struct ftrace_event_field, link);
+
+       return field;
 }
 
-static ssize_t
-event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
-                 loff_t *ppos)
+static void *f_start(struct seq_file *m, loff_t *pos)
 {
-       struct ftrace_event_call *call = filp->private_data;
-       struct list_head *head;
-       struct trace_seq *s;
-       char *buf;
-       int r;
+       loff_t l = 0;
+       void *p;
 
-       if (*ppos)
+       /* Start by showing the header */
+       if (!*pos)
+               return (void *)FORMAT_HEADER;
+
+       p = (void *)FORMAT_HEADER;
+       do {
+               p = f_next(m, p, &l);
+       } while (p && l < *pos);
+
+       return p;
+}
+
+static int f_show(struct seq_file *m, void *v)
+{
+       struct ftrace_event_call *call = m->private;
+       struct ftrace_event_field *field;
+       const char *array_descriptor;
+
+       switch ((unsigned long)v) {
+       case FORMAT_HEADER:
+               seq_printf(m, "name: %s\n", call->name);
+               seq_printf(m, "ID: %d\n", call->event.type);
+               seq_printf(m, "format:\n");
                return 0;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
-       if (!s)
-               return -ENOMEM;
+       case FORMAT_PRINTFMT:
+               seq_printf(m, "\nprint fmt: %s\n",
+                          call->print_fmt);
+               return 0;
+       }
 
-       trace_seq_init(s);
+       /*
+        * To separate common fields from event fields, the
+        * LSB is set on the first event field. Clear it and
+        * print a newline if it is set.
+        */
+       if ((unsigned long)v & 1) {
+               seq_putc(m, '\n');
+               v = (void *)((unsigned long)v & ~1L);
+       }
 
-       trace_seq_printf(s, "name: %s\n", call->name);
-       trace_seq_printf(s, "ID: %d\n", call->event.type);
-       trace_seq_printf(s, "format:\n");
+       field = v;
 
-       /* print common fields */
-       print_event_fields(s, &ftrace_common_fields);
+       /*
+        * Smartly shows the array type(except dynamic array).
+        * Normal:
+        *      field:TYPE VAR
+        * If TYPE := TYPE[LEN], it is shown:
+        *      field:TYPE VAR[LEN]
+        */
+       array_descriptor = strchr(field->type, '[');
 
-       trace_seq_putc(s, '\n');
+       if (!strncmp(field->type, "__data_loc", 10))
+               array_descriptor = NULL;
 
-       /* print event specific fields */
-       head = trace_get_fields(call);
-       print_event_fields(s, head);
+       if (!array_descriptor)
+               seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+                          field->type, field->name, field->offset,
+                          field->size, !!field->is_signed);
+       else
+               seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+                          (int)(array_descriptor - field->type),
+                          field->type, field->name,
+                          array_descriptor, field->offset,
+                          field->size, !!field->is_signed);
 
-       r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt);
+       return 0;
+}
 
-       if (!r) {
-               /*
-                * ug!  The format output is bigger than a PAGE!!
-                */
-               buf = "FORMAT TOO BIG\n";
-               r = simple_read_from_buffer(ubuf, cnt, ppos,
-                                             buf, strlen(buf));
-               goto out;
-       }
+static void f_stop(struct seq_file *m, void *p)
+{
+}
 
-       r = simple_read_from_buffer(ubuf, cnt, ppos,
-                                   s->buffer, s->len);
- out:
-       kfree(s);
-       return r;
+static const struct seq_operations trace_format_seq_ops = {
+       .start          = f_start,
+       .next           = f_next,
+       .stop           = f_stop,
+       .show           = f_show,
+};
+
+static int trace_format_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_event_call *call = inode->i_private;
+       struct seq_file *m;
+       int ret;
+
+       ret = seq_open(file, &trace_format_seq_ops);
+       if (ret < 0)
+               return ret;
+
+       m = file->private_data;
+       m->private = call;
+
+       return 0;
 }
 
 static ssize_t
@@ -877,8 +954,10 @@ static const struct file_operations ftrace_enable_fops = {
 };
 
 static const struct file_operations ftrace_event_format_fops = {
-       .open = tracing_open_generic,
-       .read = event_format_read,
+       .open = trace_format_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
 };
 
 static const struct file_operations ftrace_event_id_fops = {
index 6bff2362578115f4a087195362a5257c3b838d42..6f233698518ede15cc9302e889de9f108aa0f1cb 100644 (file)
@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
                         * if the output fails.
                         */
                        data->ent = *curr;
-                       data->ret = *next;
+                       /*
+                        * If the next event is not a return type, then
+                        * we only care about what type it is. Otherwise we can
+                        * safely copy the entire event.
+                        */
+                       if (next->ent.type == TRACE_GRAPH_RET)
+                               data->ret = *next;
+                       else
+                               data->ret.ent.type = next->ent.type;
                }
        }
 
index 9e06b7f5ecf15b6b24ec50ea832a5720145ae8f1..1b4afd2e6ca089de0babdacc5781426ef118da5c 100644 (file)
@@ -994,13 +994,16 @@ config FAULT_INJECTION_STACKTRACE_FILTER
 
 config LATENCYTOP
        bool "Latency measuring infrastructure"
+       depends on HAVE_LATENCYTOP_SUPPORT
+       depends on DEBUG_KERNEL
+       depends on STACKTRACE_SUPPORT
+       depends on PROC_FS
        select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
        select KALLSYMS
        select KALLSYMS_ALL
        select STACKTRACE
        select SCHEDSTATS
        select SCHED_DEBUG
-       depends on HAVE_LATENCYTOP_SUPPORT
        help
          Enable this option if you want to use the LatencyTOP tool
          to find out which userspace is blocking on what kernel operations.
index 0171060b5fd654e66434bc9f3ef3ee9064296d35..e67f054860877d676fc4359ef2f670b5d7531479 100755 (executable)
@@ -159,6 +159,7 @@ my $section_regex;  # Find the start of a section
 my $function_regex;    # Find the name of a function
                        #    (return offset and func name)
 my $mcount_regex;      # Find the call site to mcount (return offset)
+my $mcount_adjust;     # Address adjustment to mcount offset
 my $alignment;         # The .align value to use for $mcount_section
 my $section_type;      # Section header plus possible alignment command
 my $can_use_local = 0;         # If we can use local function references
@@ -213,6 +214,7 @@ $section_regex = "Disassembly of section\\s+(\\S+):";
 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
 $section_type = '@progbits';
+$mcount_adjust = 0;
 $type = ".long";
 
 if ($arch eq "x86_64") {
@@ -351,6 +353,9 @@ if ($arch eq "x86_64") {
 } elsif ($arch eq "microblaze") {
     # Microblaze calls '_mcount' instead of plain 'mcount'.
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+} elsif ($arch eq "blackfin") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
+    $mcount_adjust = -4;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
@@ -511,7 +516,7 @@ while (<IN>) {
     }
     # is this a call site to mcount? If so, record it to print later
     if ($text_found && /$mcount_regex/) {
-       push(@offsets, hex $1);
+       push(@offsets, (hex $1) + $mcount_adjust);
     }
 }
 
index 96bab9469d487fb8bc66c4724f14c51bf86b295a..19358dc14605bae1422ae00226291751695ba44c 100644 (file)
@@ -62,19 +62,14 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        int deleted, connected;
        int error = 0;
 
-       /* Get the root we want to resolve too */
+       /* Get the root we want to resolve too, released below */
        if (flags & PATH_CHROOT_REL) {
                /* resolve paths relative to chroot */
-               read_lock(&current->fs->lock);
-               root = current->fs->root;
-               /* released below */
-               path_get(&root);
-               read_unlock(&current->fs->lock);
+               get_fs_root(current->fs, &root);
        } else {
                /* resolve paths relative to namespace */
                root.mnt = current->nsproxy->mnt_ns->root;
                root.dentry = root.mnt->mnt_root;
-               /* released below */
                path_get(&root);
        }
 
index 42043f96e54f69d0d2cb2ab1f063a7e638d0786f..4796ddd4e721ae454a02563d713aa235870ece02 100644 (file)
@@ -2170,8 +2170,9 @@ static inline void flush_unauthorized_files(const struct cred *cred,
 
        tty = get_current_tty();
        if (tty) {
-               file_list_lock();
+               spin_lock(&tty_files_lock);
                if (!list_empty(&tty->tty_files)) {
+                       struct tty_file_private *file_priv;
                        struct inode *inode;
 
                        /* Revalidate access to controlling tty.
@@ -2179,14 +2180,16 @@ static inline void flush_unauthorized_files(const struct cred *cred,
                           than using file_has_perm, as this particular open
                           file may belong to another process and we are only
                           interested in the inode-based check here. */
-                       file = list_first_entry(&tty->tty_files, struct file, f_u.fu_list);
+                       file_priv = list_first_entry(&tty->tty_files,
+                                               struct tty_file_private, list);
+                       file = file_priv->file;
                        inode = file->f_path.dentry->d_inode;
                        if (inode_has_perm(cred, inode,
                                           FILE__READ | FILE__WRITE, NULL)) {
                                drop_tty = 1;
                        }
                }
-               file_list_unlock();
+               spin_unlock(&tty_files_lock);
                tty_kref_put(tty);
        }
        /* Reset controlling tty. */
index a3b2a6479246deca30152bbb7f84e049634f2662..134fc6c2e08dc01eeda84a730545b0532f0588fe 100644 (file)
@@ -978,6 +978,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
 {
        if (substream->runtime->trigger_master != substream)
                return 0;
+       /* some drivers might use hw_ptr to recover from the pause -
+          update the hw_ptr now */
+       if (push)
+               snd_pcm_update_hw_ptr(substream);
        /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
         * a delta betwen the current jiffies, this gives a large enough
         * delta, effectively to skip the check once.
index 4203782d7cb79bec7e7bf47157b32ee04de1efee..aff8387c45cf2e2955d71ee0ca3446808402feaa 100644 (file)
@@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
 static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
 static int enable_ir[SNDRV_CARDS];
 static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
+static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
@@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444);
 MODULE_PARM_DESC(enable_ir, "Enable IR.");
 module_param_array(subsystem, uint, NULL, 0444);
 MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
+module_param_array(delay_pcm_irq, uint, NULL, 0444);
+MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
 /*
  * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value  Model:SB0400
  */
@@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
                                      &emu)) < 0)
                goto error;
        card->private_data = emu;
+       emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
        if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
                goto error;
        if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
index 55b83ef73c630e83b2d098a45cce1f570923df7f..622bace148e3c4e5e4de3efa1936b56245004c69 100644 (file)
@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
                evoice->epcm->ccca_start_addr = start_addr + ccis;
                if (extra) {
                        start_addr += ccis;
-                       end_addr += ccis;
+                       end_addr += ccis + emu->delay_pcm_irq;
                }
                if (stereo && !extra) {
                        snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
@@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
        /* Assumption that PT is already 0 so no harm overwriting */
        snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
        snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
-       snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
+       snd_emu10k1_ptr_write(emu, PSST, voice,
+                       (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
+                       (send_amount[2] << 24));
        if (emu->card_capabilities->emu_model)
                pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
        else 
@@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_
        snd_emu10k1_ptr_write(emu, IP, voice, 0);
 }
 
+static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
+               struct snd_emu10k1_pcm *epcm,
+               struct snd_pcm_substream *substream,
+               struct snd_pcm_runtime *runtime)
+{
+       unsigned int ptr, period_pos;
+
+       /* try to sychronize the current position for the interrupt
+          source voice */
+       period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
+       period_pos %= runtime->period_size;
+       ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
+       ptr &= ~0x00ffffff;
+       ptr |= epcm->ccca_start_addr + period_pos;
+       snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
+}
+
 static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
                                        int cmd)
 {
@@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
                /* follow thru */
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
        case SNDRV_PCM_TRIGGER_RESUME:
+               if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
+                       snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
                mix = &emu->pcm_mixer[substream->number];
                snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
                snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
@@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream *
 #endif
        /*
        printk(KERN_DEBUG
-              "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
-              ptr, runtime->buffer_size, runtime->period_size);
+              "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
+              (long)ptr, (long)runtime->buffer_size,
+              (long)runtime->period_size);
        */
        return ptr;
 }
index ffb1ddb8dc28ea1d3fbdf574d0c4efbef9772c98..957a311514c8ed9fe34586b518aa463ec279dddd 100644 (file)
@@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
        if (snd_BUG_ON(!hdr))
                return NULL;
 
+       idx = runtime->period_size >= runtime->buffer_size ?
+                                       (emu->delay_pcm_irq * 2) : 0;
        mutex_lock(&hdr->block_mutex);
-       blk = search_empty(emu, runtime->dma_bytes);
+       blk = search_empty(emu, runtime->dma_bytes + idx);
        if (blk == NULL) {
                mutex_unlock(&hdr->block_mutex);
                return NULL;
index 31b5d9eeba68655db32a631160bcf95049b4f1d0..c424952a734e0e48546518fe6b47ccbf959e9aee 100644 (file)
@@ -3049,6 +3049,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
                      CXT5066_DELL_LAPTOP),
        SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
+       SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
index 2cd1ae809e4677a84834654bcdafa3e07ddaf06a..a4dd04524e4391ce7d47f76196b6eeca5e6e9f85 100644 (file)
@@ -19030,6 +19030,7 @@ static int patch_alc888(struct hda_codec *codec)
 /*
  * ALC680 support
  */
+#define ALC680_DIGIN_NID       ALC880_DIGIN_NID
 #define ALC680_DIGOUT_NID      ALC880_DIGOUT_NID
 #define alc680_modes           alc260_modes
 
@@ -19044,23 +19045,93 @@ static hda_nid_t alc680_adc_nids[3] = {
        0x07, 0x08, 0x09
 };
 
+/*
+ * Analog capture ADC cgange
+ */
+static int alc680_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
+                                     struct hda_codec *codec,
+                                     unsigned int stream_tag,
+                                     unsigned int format,
+                                     struct snd_pcm_substream *substream)
+{
+       struct alc_spec *spec = codec->spec;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
+       unsigned int pre_mic, pre_line;
+
+       pre_mic  = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
+       pre_line = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_LINE]);
+
+       spec->cur_adc_stream_tag = stream_tag;
+       spec->cur_adc_format = format;
+
+       if (pre_mic || pre_line) {
+               if (pre_mic)
+                       snd_hda_codec_setup_stream(codec, 0x08, stream_tag, 0,
+                                                                       format);
+               else
+                       snd_hda_codec_setup_stream(codec, 0x09, stream_tag, 0,
+                                                                       format);
+       } else
+               snd_hda_codec_setup_stream(codec, 0x07, stream_tag, 0, format);
+       return 0;
+}
+
+static int alc680_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
+                                     struct hda_codec *codec,
+                                     struct snd_pcm_substream *substream)
+{
+       snd_hda_codec_cleanup_stream(codec, 0x07);
+       snd_hda_codec_cleanup_stream(codec, 0x08);
+       snd_hda_codec_cleanup_stream(codec, 0x09);
+       return 0;
+}
+
+static struct hda_pcm_stream alc680_pcm_analog_auto_capture = {
+       .substreams = 1, /* can be overridden */
+       .channels_min = 2,
+       .channels_max = 2,
+       /* NID is set in alc_build_pcms */
+       .ops = {
+               .prepare = alc680_capture_pcm_prepare,
+               .cleanup = alc680_capture_pcm_cleanup
+       },
+};
+
 static struct snd_kcontrol_new alc680_base_mixer[] = {
        /* output mixer control */
        HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
        HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
        HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT),
        HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT),
        HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+       HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT),
        { }
 };
 
-static struct snd_kcontrol_new alc680_capture_mixer[] = {
-       HDA_CODEC_VOLUME("Capture Volume", 0x07, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x07, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x08, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x08, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 2, 0x09, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 2, 0x09, 0x0, HDA_INPUT),
+static struct hda_bind_ctls alc680_bind_cap_vol = {
+       .ops = &snd_hda_bind_vol,
+       .values = {
+               HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
+               0
+       },
+};
+
+static struct hda_bind_ctls alc680_bind_cap_switch = {
+       .ops = &snd_hda_bind_sw,
+       .values = {
+               HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
+               HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
+               0
+       },
+};
+
+static struct snd_kcontrol_new alc680_master_capture_mixer[] = {
+       HDA_BIND_VOL("Capture Volume", &alc680_bind_cap_vol),
+       HDA_BIND_SW("Capture Switch", &alc680_bind_cap_switch),
        { } /* end */
 };
 
@@ -19068,25 +19139,73 @@ static struct snd_kcontrol_new alc680_capture_mixer[] = {
  * generic initialization of ADC, input mixers and output mixers
  */
 static struct hda_verb alc680_init_verbs[] = {
-       /* Unmute DAC0-1 and set vol = 0 */
-       {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+       {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
 
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40},
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40},
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0},
-       {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24},
-       {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20},
+       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+       {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+       {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
 
        {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
        {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+
+       {0x16, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT   | AC_USRSP_EN},
+       {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT  | AC_USRSP_EN},
+
        { }
 };
 
+/* toggle speaker-output according to the hp-jack state */
+static void alc680_base_setup(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+
+       spec->autocfg.hp_pins[0] = 0x16;
+       spec->autocfg.speaker_pins[0] = 0x14;
+       spec->autocfg.speaker_pins[1] = 0x15;
+       spec->autocfg.input_pins[AUTO_PIN_MIC] = 0x18;
+       spec->autocfg.input_pins[AUTO_PIN_LINE] = 0x19;
+}
+
+static void alc680_rec_autoswitch(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
+       unsigned int present;
+       hda_nid_t new_adc;
+
+       present = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
+
+       new_adc = present ? 0x8 : 0x7;
+       __snd_hda_codec_cleanup_stream(codec, !present ? 0x8 : 0x7, 1);
+       snd_hda_codec_setup_stream(codec, new_adc,
+                                  spec->cur_adc_stream_tag, 0,
+                                  spec->cur_adc_format);
+
+}
+
+static void alc680_unsol_event(struct hda_codec *codec,
+                                          unsigned int res)
+{
+       if ((res >> 26) == ALC880_HP_EVENT)
+               alc_automute_amp(codec);
+       if ((res >> 26) == ALC880_MIC_EVENT)
+               alc680_rec_autoswitch(codec);
+}
+
+static void alc680_inithook(struct hda_codec *codec)
+{
+       alc_automute_amp(codec);
+       alc680_rec_autoswitch(codec);
+}
+
 /* create input playback/capture controls for the given pin */
 static int alc680_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
                                    const char *ctlname, int idx)
@@ -19197,13 +19316,7 @@ static void alc680_auto_init_hp_out(struct hda_codec *codec)
 #define alc680_pcm_analog_capture      alc880_pcm_analog_capture
 #define alc680_pcm_analog_alt_capture  alc880_pcm_analog_alt_capture
 #define alc680_pcm_digital_playback    alc880_pcm_digital_playback
-
-static struct hda_input_mux alc680_capture_source = {
-       .num_items = 1,
-       .items = {
-               { "Mic", 0x0 },
-       },
-};
+#define alc680_pcm_digital_capture     alc880_pcm_digital_capture
 
 /*
  * BIOS auto configuration
@@ -19218,6 +19331,7 @@ static int alc680_parse_auto_config(struct hda_codec *codec)
                                           alc680_ignore);
        if (err < 0)
                return err;
+
        if (!spec->autocfg.line_outs) {
                if (spec->autocfg.dig_outs || spec->autocfg.dig_in_pin) {
                        spec->multiout.max_channels = 2;
@@ -19239,8 +19353,6 @@ static int alc680_parse_auto_config(struct hda_codec *codec)
                add_mixer(spec, spec->kctls.list);
 
        add_verb(spec, alc680_init_verbs);
-       spec->num_mux_defs = 1;
-       spec->input_mux = &alc680_capture_source;
 
        err = alc_auto_add_mic_boost(codec);
        if (err < 0)
@@ -19279,17 +19391,17 @@ static struct snd_pci_quirk alc680_cfg_tbl[] = {
 static struct alc_config_preset alc680_presets[] = {
        [ALC680_BASE] = {
                .mixers = { alc680_base_mixer },
-               .cap_mixer =  alc680_capture_mixer,
+               .cap_mixer =  alc680_master_capture_mixer,
                .init_verbs = { alc680_init_verbs },
                .num_dacs = ARRAY_SIZE(alc680_dac_nids),
                .dac_nids = alc680_dac_nids,
-               .num_adc_nids = ARRAY_SIZE(alc680_adc_nids),
-               .adc_nids = alc680_adc_nids,
-               .hp_nid = 0x04,
                .dig_out_nid = ALC680_DIGOUT_NID,
                .num_channel_mode = ARRAY_SIZE(alc680_modes),
                .channel_mode = alc680_modes,
-               .input_mux = &alc680_capture_source,
+               .unsol_event = alc680_unsol_event,
+               .setup = alc680_base_setup,
+               .init_hook = alc680_inithook,
+
        },
 };
 
@@ -19333,9 +19445,9 @@ static int patch_alc680(struct hda_codec *codec)
                setup_preset(codec, &alc680_presets[board_config]);
 
        spec->stream_analog_playback = &alc680_pcm_analog_playback;
-       spec->stream_analog_capture = &alc680_pcm_analog_capture;
-       spec->stream_analog_alt_capture = &alc680_pcm_analog_alt_capture;
+       spec->stream_analog_capture = &alc680_pcm_analog_auto_capture;
        spec->stream_digital_playback = &alc680_pcm_digital_playback;
+       spec->stream_digital_capture = &alc680_pcm_digital_capture;
 
        if (!spec->adc_nids) {
                spec->adc_nids = alc680_adc_nids;
index f64fb7d988cb57f63876bbc92f0b94d93beb9cfe..ad5202efd7a9e270523cc517e487c6e1997027e3 100644 (file)
@@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
                    firmware.firmware.ASIC, firmware.firmware.CODEC,
                    firmware.firmware.AUXDSP, firmware.firmware.PROG);
 
+       if (!chip)
+               return 1;
+
        for (i = 0; i < FIRMWARE_VERSIONS; i++) {
                if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
-                       break;
-       }
-       if (i >= FIRMWARE_VERSIONS)
-               return 0; /* no match */
+                       return 1; /* OK */
 
-       if (!chip)
-               return 1; /* OK */
+       }
 
        snd_printdd("Writing Firmware\n");
        if (!chip->fw_entry) {
index 4e212ed62ea609be9e1b3d45a5ac3fb6efea1a38..f8154e661524c64e1098f318d447ba7bfcda3616 100644 (file)
@@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_LEFT_J:
                iface |= 0x0001;
                break;
-               /* FIXME: CHECK A/B */
-       case SND_SOC_DAIFMT_DSP_A:
-               iface |= 0x0003;
-               break;
-       case SND_SOC_DAIFMT_DSP_B:
-               iface |= 0x0007;
-               break;
        default:
                return -EINVAL;
        }
index 41abb90df50d1fdf97a062e5be387613c7131ee6..dcb9700b88d2ec86c456c1185e99cf8b4619dfe4 100644 (file)
@@ -157,10 +157,6 @@ all::
 #
 # Define NO_DWARF if you do not want debug-info analysis feature at all.
 
-$(shell sh -c 'mkdir -p $(OUTPUT)scripts/{perl,python}/Perf-Trace-Util/' 2> /dev/null)
-$(shell sh -c 'mkdir -p $(OUTPUT)util/{ui/browsers,scripting-engines}/' 2> /dev/null)
-$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null)
-
 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
 -include $(OUTPUT)PERF-VERSION-FILE
@@ -186,8 +182,6 @@ ifeq ($(ARCH),x86_64)
         ARCH := x86
 endif
 
-$(shell sh -c 'mkdir -p $(OUTPUT)arch/$(ARCH)/util/' 2> /dev/null)
-
 # CFLAGS and LDFLAGS are for the users to override from the command line.
 
 #
@@ -268,6 +262,7 @@ export prefix bindir sharedir sysconfdir
 CC = $(CROSS_COMPILE)gcc
 AR = $(CROSS_COMPILE)ar
 RM = rm -f
+MKDIR = mkdir
 TAR = tar
 FIND = find
 INSTALL = install
@@ -838,6 +833,7 @@ ifndef V
        QUIET_CC       = @echo '   ' CC $@;
        QUIET_AR       = @echo '   ' AR $@;
        QUIET_LINK     = @echo '   ' LINK $@;
+       QUIET_MKDIR    = @echo '   ' MKDIR $@;
        QUIET_BUILT_IN = @echo '   ' BUILTIN $@;
        QUIET_GEN      = @echo '   ' GEN $@;
        QUIET_SUBDIR0  = +@subdir=
@@ -1012,6 +1008,14 @@ $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
 $(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
 builtin-revert.o wt-status.o: wt-status.h
 
+# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
+# we depend the various files onto their directories.
+DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
+$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
+# In the second step, we make a rule to actually create these directories
+$(sort $(dir $(DIRECTORY_DEPS))):
+       $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+
 $(LIB_FILE): $(LIB_OBJS)
        $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
 
index 55ff792459accf7a054a01b7d7b3dba55cc64281..a90273e63f4fb6939ea64e074513e1afabb1f289 100644 (file)
@@ -146,6 +146,7 @@ static int annotate_browser__run(struct annotate_browser *self,
                return -1;
 
        newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
+       newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
 
        nd = self->curr_hot;
        if (nd) {
@@ -178,7 +179,7 @@ static int annotate_browser__run(struct annotate_browser *self,
        }
 out:
        ui_browser__hide(&self->b);
-       return 0;
+       return es->u.key;
 }
 
 int hist_entry__tui_annotate(struct hist_entry *self)