]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Sep 2010 03:28:19 +0000 (20:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Sep 2010 03:28:19 +0000 (20:28 -0700)
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  libata-sff: Reenable Port Multiplier after libata-sff remodeling.
  libata: skip EH autopsy and recovery during suspend
  ahci: AHCI and RAID mode SATA patch for Intel Patsburg DeviceIDs
  ata_piix: IDE Mode SATA patch for Intel Patsburg DeviceIDs
  libata,pata_via: revert ata_wait_idle() removal from ata_sff/via_tf_load()
  ahci: fix hang on failed softreset
  pata_artop: Fix device ID parity check

140 files changed:
Documentation/DocBook/kernel-locking.tmpl
Documentation/gpio.txt
Documentation/mutex-design.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/it8152.c
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/perf_event.c
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx25/mach-cpuimx25.c
arch/arm/mach-mx3/clock-imx35.c
arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx3/mach-cpuimx35.c
arch/arm/mach-mx5/clock-mx51.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/cpufreq-pxa3xx.c
arch/arm/mach-pxa/include/mach/mfp-pxa300.h
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock.c
arch/arm/mach-shmobile/pm_runtime.c [new file with mode: 0644]
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/plat-mxc/Kconfig
arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
arch/arm/plat-mxc/tzic.c
arch/arm/plat-pxa/pwm.c
arch/arm/tools/mach-types
arch/powerpc/include/asm/fsldma.h
arch/x86/include/asm/iomap.h
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/trampoline.c
arch/x86/mm/iomap_32.c
arch/x86/oprofile/nmi_int.c
drivers/char/vt_ioctl.c
drivers/gpio/sx150x.c
drivers/hwmon/hp_accel.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/input/input.c
drivers/input/mouse/bcm5974.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_wac.c
drivers/mmc/core/sdio.c
drivers/mmc/host/at91_mci.c
drivers/mmc/host/imxmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-pl031.c
drivers/video/pxa168fb.c
fs/binfmt_misc.c
fs/direct-io.c
fs/fcntl.c
fs/fuse/dev.c
fs/fuse/file.c
fs/minix/namei.c
fs/nfsd/nfs4state.c
fs/ocfs2/alloc.c
fs/ocfs2/blockcheck.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/mmap.c
fs/ocfs2/namei.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/proc/page.c
fs/proc/task_mmu.c
include/asm-generic/gpio.h
include/linux/cgroup.h
include/linux/i2c/sx150x.h
include/linux/io-mapping.h
include/linux/kfifo.h
include/linux/ksm.h
include/linux/lglock.h
include/linux/mm.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/mutex.h
include/linux/semaphore.h
include/linux/swap.h
include/linux/vmstat.h
kernel/cgroup.c
kernel/debug/kdb/kdb_bp.c
kernel/gcov/fs.c
kernel/groups.c
kernel/hrtimer.c
kernel/mutex.c
kernel/perf_event.c
kernel/power/hibernate.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/sched_fair.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/watchdog.c
mm/Kconfig
mm/bounce.c
mm/compaction.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmzone.c
mm/page_alloc.c
mm/swapfile.c
mm/vmstat.c
security/apparmor/include/resource.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/path.c
security/apparmor/policy.c
security/apparmor/resource.c
security/integrity/ima/ima.h
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_main.c
tools/perf/util/callchain.h

index 0b1a3f97f285361a4075c8e267d42b2053747d9a..a0d479d1e1dd872bd1ae7b4d17d4582df03a384c 100644 (file)
@@ -1961,6 +1961,12 @@ machines due to caching.
    </sect1>
   </chapter>
 
+  <chapter id="apiref">
+   <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+  </chapter>
+
   <chapter id="references">
    <title>Further reading</title>
 
index d96a6dba57489bc6bbf3e747d82cd450084e5609..9633da01ff46afb008566ccb53aa381606654e85 100644 (file)
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
 
 If you want to initialize a structure with an invalid GPIO number, use
 some negative number (perhaps "-EINVAL"); that will never be valid.  To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
 
        int gpio_is_valid(int number);
 
 A number that's not valid will be rejected by calls which may request
 or free GPIOs (see below).  Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
-
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
+example, a number might be valid but temporarily unused on a given board.
 
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime.  Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
 
 Using GPIOs
 -----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
 ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
 and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
 three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
 
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space.  (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
 into the kernel on that architecture.
 
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
 can enable it and build it into the kernel optionally.
 
 If neither of these options are selected, the platform does not support
index c91ccc0720fa97f42a1a616fd83e23628ae85ab1..38c10fd7f4110448facd7089b985c4776d264d85 100644 (file)
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
 mutex semantics are sufficient for your code, then there are a couple
 of advantages of mutexes:
 
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
    'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
    A smaller structure size means less RAM footprint, and better
    CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
  void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  int  mutex_lock_interruptible_nested(struct mutex *lock,
                                       unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
index 087912aa09bda419450654f904c96289dcb1b43c..9800de5ec22285375123299c79759a1111290ced 100644 (file)
@@ -2787,11 +2787,6 @@ S:       Maintained
 F:     arch/x86/kernel/hpet.c
 F:     arch/x86/include/asm/hpet.h
 
-HPET:  ACPI
-M:     Bob Picco <bob.picco@hp.com>
-S:     Maintained
-F:     drivers/char/hpet.c
-
 HPFS FILESYSTEM
 M:     Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
 W:     http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3404,7 +3399,7 @@ F:        drivers/s390/kvm/
 
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
-W:     http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W:     http://kernel.org/pub/linux/utils/kernel/kexec/
 L:     kexec@lists.infradead.org
 S:     Maintained
 F:     include/linux/kexec.h
@@ -4810,6 +4805,7 @@ RCUTORTURE MODULE
 M:     Josh Triplett <josh@freedesktop.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
 
@@ -4834,6 +4830,7 @@ M:        Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:     http://www.rdrop.com/users/paulmck/rclock/
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/
 F:     include/linux/rcu*
 F:     include/linux/srcu*
@@ -4841,12 +4838,10 @@ F:      kernel/rcu*
 F:     kernel/srcu*
 X:     kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
 M:     Paul Gortmaker <p_gortmaker@yahoo.com>
 S:     Maintained
-F:     Documentation/rtc.txt
-F:     drivers/rtc/
-F:     include/linux/rtc.h
+F:     drivers/char/rtc.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
index 16bc8eb4901c9335b32a774082e9bc1cc77bb8b9..553b7cf17bfb0bac057eaedf0402a0bca2aa47e8 100644 (file)
@@ -1576,97 +1576,6 @@ config AUTO_ZRELADDR
          0xf8000000. This assumes the zImage being placed in the first 128MB
          from start of memory.
 
-config ZRELADDR
-       hex "Physical address of the decompressed kernel image"
-       depends on !AUTO_ZRELADDR
-       default 0x00008000 if ARCH_BCMRING ||\
-               ARCH_CNS3XXX ||\
-               ARCH_DOVE ||\
-               ARCH_EBSA110 ||\
-               ARCH_FOOTBRIDGE ||\
-               ARCH_INTEGRATOR ||\
-               ARCH_IOP13XX ||\
-               ARCH_IOP33X ||\
-               ARCH_IXP2000 ||\
-               ARCH_IXP23XX ||\
-               ARCH_IXP4XX ||\
-               ARCH_KIRKWOOD ||\
-               ARCH_KS8695 ||\
-               ARCH_LOKI ||\
-               ARCH_MMP ||\
-               ARCH_MV78XX0 ||\
-               ARCH_NOMADIK ||\
-               ARCH_NUC93X ||\
-               ARCH_NS9XXX ||\
-               ARCH_ORION5X ||\
-               ARCH_SPEAR3XX ||\
-               ARCH_SPEAR6XX ||\
-               ARCH_TEGRA ||\
-               ARCH_U8500 ||\
-               ARCH_VERSATILE ||\
-               ARCH_W90X900
-       default 0x08008000 if ARCH_MX1 ||\
-               ARCH_SHARK
-       default 0x10008000 if ARCH_MSM ||\
-               ARCH_OMAP1 ||\
-               ARCH_RPC
-       default 0x20008000 if ARCH_S5P6440 ||\
-               ARCH_S5P6442 ||\
-               ARCH_S5PC100 ||\
-               ARCH_S5PV210
-       default 0x30008000 if ARCH_S3C2410 ||\
-               ARCH_S3C2400 ||\
-               ARCH_S3C2412 ||\
-               ARCH_S3C2416 ||\
-               ARCH_S3C2440 ||\
-               ARCH_S3C2443
-       default 0x40008000 if ARCH_STMP378X ||\
-               ARCH_STMP37XX ||\
-               ARCH_SH7372 ||\
-               ARCH_SH7377 ||\
-               ARCH_S5PV310
-       default 0x50008000 if ARCH_S3C64XX ||\
-               ARCH_SH7367
-       default 0x60008000 if ARCH_VEXPRESS
-       default 0x80008000 if ARCH_MX25 ||\
-               ARCH_MX3 ||\
-               ARCH_NETX ||\
-               ARCH_OMAP2PLUS ||\
-               ARCH_PNX4008
-       default 0x90008000 if ARCH_MX5 ||\
-               ARCH_MX91231
-       default 0xa0008000 if ARCH_IOP32X ||\
-               ARCH_PXA ||\
-               MACH_MX27
-       default 0xc0008000 if ARCH_LH7A40X ||\
-               MACH_MX21
-       default 0xf0008000 if ARCH_AAEC2000 ||\
-               ARCH_L7200
-       default 0xc0028000 if ARCH_CLPS711X
-       default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
-       default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
-       default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
-       default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
-       default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
-       default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
-       default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
-       default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
-       default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
-       default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
-       default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
-       default 0xc0208000 if ARCH_SA1100 && SA1111
-       default 0xc0008000 if ARCH_SA1100 && !SA1111
-       default 0x30108000 if ARCH_S3C2410 && PM_H1940
-       default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
-       default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
-       help
-         ZRELADDR is the physical address where the decompressed kernel
-         image will be placed. ZRELADDR has to be specified when the
-         assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
-         selected.
-
 endmenu
 
 menu "CPU Power Management"
index f705213caa881af9c07e181c0d2a3a1a26a5d98a..4a590f4113e2af044ea1764aeb0681ad7f50a74c 100644 (file)
 MKIMAGE         := $(srctree)/scripts/mkuboot.sh
 
 ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
 endif
 
 # Note: the following conditions must always be true:
+#   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
 #   PARAMS_PHYS must be within 4MB of ZRELADDR
 #   INITRD_PHYS must be in RAM
+ZRELADDR    := $(zreladdr-y)
 PARAMS_PHYS := $(params_phys-y)
 INITRD_PHYS := $(initrd_phys-y)
 
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE  $@
 ifeq ($(CONFIG_ZBOOT_ROM),y)
 $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
 else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
 endif
 
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
index 68775e33476c2fafb4c20d88f7f676c836a8edc1..b23f6bc46cfa1dd1029bb53dc7009c3c790f1088 100644 (file)
@@ -79,6 +79,10 @@ endif
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  := -Wa,-march=all
 
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 endif
index 6af9907c3b5ccad2ae2d73e37f5470c5b2f6b897..6825c34646d4e02f24b0eefe7ab4e012bca05208 100644 (file)
@@ -177,7 +177,7 @@ not_angel:
                and     r4, pc, #0xf8000000
                add     r4, r4, #TEXT_OFFSET
 #else
-               ldr     r4, =CONFIG_ZRELADDR
+               ldr     r4, =zreladdr
 #endif
                subs    r0, r0, r1              @ calculate the delta offset
 
index 6c091356245593b87860d2ccb6221650fc62855b..7974baacafcea74ec055a46ee0f6cea496f24e6f 100644 (file)
@@ -263,6 +263,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+               __func__, dma_addr, size);
+       return (dev->bus == &pci_bus_type) &&
+               ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index c226fe10553e2952ec982ef3ec5fcaf8538586e0..c568da7dcae45e60e8630e2b3060599f561d6555 100644 (file)
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
  * DMA access and 1 if the buffer needs to be bounced.
  *
  */
-#ifdef CONFIG_SA1111
 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
-                                  size_t size)
-{
-       return 0;
-}
-#endif
 
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
index 48837e6d888722dc96f594247a80026bf9b75e29..b5799a3b7117d5480eec6456008f79e01d1064ae 100644 (file)
@@ -17,7 +17,7 @@
  * counter interrupts are regular interrupts and not an NMI. This
  * means that when we receive the interrupt we can call
  * perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
+ * interrupts disabled.
  */
 static inline void
 set_perf_event_pending(void)
index d02cfb683487eeafea4ef407a1a4e6f2d4ce4112..c891eb76c0e313406847e7b9fbe968bb1b8fa459 100644 (file)
 #define __NR_perf_event_open           (__NR_SYSCALL_BASE+364)
 #define __NR_recvmmsg                  (__NR_SYSCALL_BASE+365)
 #define __NR_accept4                   (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init             (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark             (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64                 (__NR_SYSCALL_BASE+369)
 
 /*
  * The following SWIs are ARM private.
index afeb71fa72cb81fc0e2fb5652c653ef34e7258bb..5c26eccef9982665b1e1672416b9bc996f3b2dae 100644 (file)
                CALL(sys_perf_event_open)
 /* 365 */      CALL(sys_recvmmsg)
                CALL(sys_accept4)
+               CALL(sys_fanotify_init)
+               CALL(sys_fanotify_mark)
+               CALL(sys_prlimit64)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 417c392ddf1cb55066fa5f99e83e77514bd89901..ecbb0288e5dd95c80b420635dffee6ef600f86a8 100644 (file)
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event fake_event = event->hw;
 
-       if (event->pmu && event->pmu != &pmu)
-               return 0;
+       if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+               return 1;
 
        return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
 }
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
index 8bf3cec98cfadba46d8ca1816c7aff5b8a871bbf..4566bd1c8660b3fe7ac0cff28473746fd3d34c82 100644 (file)
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
        clkdev_add_table(clocks, ARRAY_SIZE(clocks));
        return 0;
 }
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
index 91931dcb068997d540dd2a133001eed458cbc81b..4aaadc753d3e6ff4e60b88c17e62e5b3c0cfb812 100644 (file)
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index a5f0174290b4eaa0ae36a1769ae4c9cb32631d3c..e064bb3d69197b8ddee286eda06dee980052712d 100644 (file)
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
        if (!otg_mode_host)
                mxc_register_device(&otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+       eukrea_mbimxsd25_baseboard_init();
 #endif
 }
 
index d3af0fdf8475f7ef0d67b3afbb080df739c36431..7a62e744a8b0fcbc5eef1da9645bf663199608f8 100644 (file)
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
        if (aad->sel)
-               fref = fref * 2 / 3;
+               fref = fref * 3 / 4;
 
        return fref / aad->arm;
 }
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_mpll();
+       unsigned long fref = get_rate_arm();
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
 
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
        return get_rate_ahb(NULL) >> 1;
 }
 
-static unsigned long get_3_3_div(unsigned long in)
-{
-       return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
 static unsigned long get_rate_uart(struct clk *clk)
 {
        unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div = get_3_3_div(pdr4 >> 10);
+       unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
 
        if (pdr3 & (1 << 14))
                return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
                break;
        }
 
-       return rate / get_3_3_div(div);
+       return rate / (div + 1);
 }
 
 static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+       return rate / (((pdr2 >> 16) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+       return rate / (((pdr4 >> 22) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_ipg_per(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div1, div2;
+       unsigned long div;
 
        if (pdr0 & (1 << 26)) {
-               div1 = (pdr4 >> 19) & 0x7;
-               div2 = (pdr4 >> 16) & 0x7;
-               return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+               div = (pdr4 >> 16) & 0x3f;
+               return get_rate_arm() / (div + 1);
        } else {
-               div1 = (pdr0 >> 12) & 0x7;
-               return get_rate_ahb(NULL) / div1;
+               div = (pdr0 >> 12) & 0x7;
+               return get_rate_ahb(NULL) / (div + 1);
        }
 }
 
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+       unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+       unsigned long fref = get_rate_mpll();
+
+       if (fref > 400 * 1000 * 1000) {
+               switch (hsp_podf) {
+               case 0:
+                       return fref >> 2;
+               case 1:
+                       return fref >> 3;
+               case 2:
+                       return fref / 3;
+               }
+       } else {
+               switch (hsp_podf) {
+               case 0:
+               case 2:
+                       return fref / 3;
+               case 1:
+                       return fref / 6;
+               }
+       }
+
+       return 0;
+}
+
 static int clk_cgr_enable(struct clk *clk)
 {
        u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk,   0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c2_clk,   1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c3_clk,   2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_hsp, NULL);
 DEFINE_CLOCK(kpp_clk,    0, CCM_CGR1, 20, get_rate_ipg, NULL);
 DEFINE_CLOCK(mlb_clk,    0, CCM_CGR1, 22, get_rate_ahb, NULL);
 DEFINE_CLOCK(mshc_clk,   0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int ll = 0;
+       unsigned int cgr2 = 3 << 26, cgr3 = 0;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       ll = (3 << 16);
+       cgr2 |= 3 << 16;
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
-       __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
-       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       /*
+        * Check if we came up in internal boot mode. If yes, we need some
+        * extra clocks turned on, otherwise the MX35 boot ROM code will
+        * hang after a watchdog reset.
+        */
+       if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+               /* Additionally turn on UART1, SCC, and IIM clocks */
+               cgr2 |= 3 << 16 | 3 << 4;
+               cgr3 |= 3 << 2;
+       }
+
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
 
        mxc_timer_init(&gpt_clk,
                        MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
index 1dc5004df866d4e9df7d817aca27e658573f8c6c..f8f15e3ac7a0e82a6cde9f6bd021c85a546bbae7 100644 (file)
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index 9770a6a973be561fdfb67cfdda1f8cdf36381e8e..2a4f8b781ba4c60a4d66f554d860953fcafe1cf0 100644 (file)
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
        if (!otg_mode_host)
                mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+       eukrea_mbimxsd35_baseboard_init();
 #endif
 }
 
index 6af69def357f92d2f177d19d8fc7bce330ff5666..57c10a9926cc5056668645ff39fe3ac07fed9969 100644 (file)
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
 {
        u32 reg;
        reg = __raw_readl(clk->enable_reg);
-       reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+       reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
        __raw_writel(reg, clk->enable_reg);
 
 }
index 268a9bc6be8a22a4ca0ac4fed742d035753581a3..50d5939a78f1bdc8f318f08360243626a14a6105 100644 (file)
@@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 {
        int i;
        unsigned int freq;
index 27fa329d9a8b7a5677c2cf75e25900797130eb46..0a0d0fe99220d7f450e61dc04495d8dfe4492be3 100644 (file)
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret = -EINVAL;
 
index 7139e0dc26d16062304bd72beca1f339e61899e7..4e1287070d219c32235aebadfbf2d97df3d059fb 100644 (file)
 #define GPIO46_CI_DD_7         MFP_CFG_DRV(GPIO46, AF0, DS04X)
 #define GPIO47_CI_DD_8         MFP_CFG_DRV(GPIO47, AF1, DS04X)
 #define GPIO48_CI_DD_9         MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
 #define GPIO49_CI_MCLK         MFP_CFG_DRV(GPIO49, AF0, DS04X)
 #define GPIO50_CI_PCLK         MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
 
 /* KEYPAD */
 #define GPIO3_KP_DKIN_6                MFP_CFG_LPM(GPIO3,   AF2, FLOAT)
index 5e16b4c692222a4a45d5728cec5b4970cae495e0..ae416fe7daf2e61b09f683c9be36ebc57c105c62 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o
+obj-y                          := timer.o console.o clock.o pm_runtime.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index 23d472f9525e6a160c97cbf8adc21c505819fb05..95935c83c30654ee94d301e94086680475a6ed67 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
 #include <linux/sh_clk.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/usb/r8a66597.h>
 
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
        .dma_slave_tx   = SHDMA_SLAVE_SDHI1_TX,
        .dma_slave_rx   = SHDMA_SLAVE_SDHI1_RX,
        .tmio_ocr_mask  = MMC_VDD_165_195,
+       .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
 
 static struct platform_device fsi_device = {
        .name           = "sh_fsi2",
-       .id             = 0,
+       .id             = -1,
        .num_resources  = ARRAY_SIZE(fsi_resources),
        .resource       = fsi_resources,
        .dev    = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
        },
 };
 
+static struct gpio_led ap4evb_leds[] = {
+       {
+               .name                   = "led4",
+               .gpio                   = GPIO_PORT185,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led2",
+               .gpio                   = GPIO_PORT186,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led3",
+               .gpio                   = GPIO_PORT187,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led1",
+               .gpio                   = GPIO_PORT188,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+       .num_leds = ARRAY_SIZE(ap4evb_leds),
+       .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+       .name = "leds-gpio",
+       .id = 0,
+       .dev = {
+               .platform_data  = &ap4evb_leds_pdata,
+       },
+};
+
 static struct platform_device *ap4evb_devices[] __initdata = {
+       &leds_device,
        &nor_flash_device,
        &smc911x_device,
        &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
        gpio_request(GPIO_FN_CS5A,      NULL);
        gpio_request(GPIO_FN_IRQ6_39,   NULL);
 
-       /* enable LED 1 - 4 */
-       gpio_request(GPIO_PORT185, NULL);
-       gpio_request(GPIO_PORT186, NULL);
-       gpio_request(GPIO_PORT187, NULL);
-       gpio_request(GPIO_PORT188, NULL);
-       gpio_direction_output(GPIO_PORT185, 1);
-       gpio_direction_output(GPIO_PORT186, 1);
-       gpio_direction_output(GPIO_PORT187, 1);
-       gpio_direction_output(GPIO_PORT188, 1);
-       gpio_export(GPIO_PORT185, 0);
-       gpio_export(GPIO_PORT186, 0);
-       gpio_export(GPIO_PORT187, 0);
-       gpio_export(GPIO_PORT188, 0);
-
        /* enable Debug switch (S6) */
        gpio_request(GPIO_PORT32, NULL);
        gpio_request(GPIO_PORT33, NULL);
index fb4e9b1d788e464922ba2345d60fb43b8e1173d2..759468992ad287ff3f40b2f2e92e19d99734c94a 100644 (file)
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
 
 struct clk pllc2_clk = {
        .ops            = &pllc2_clk_ops,
-       .flags          = CLK_ENABLE_ON_INIT,
        .parent         = &extal1_div2_clk,
        .freq_table     = pllc2_freq_table,
        .parent_table   = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
 
 enum { MSTP001,
        MSTP131, MSTP130,
-       MSTP129, MSTP128,
+       MSTP129, MSTP128, MSTP127, MSTP126,
        MSTP118, MSTP117, MSTP116,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
        [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
        [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
        [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
        [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
        [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
        [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
-       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
        [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
        [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
        [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
        CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
index b7c705a213a2a1400e180df83d9e5a6f67649db4..6b7c7c42bc8fc529678fe7e77d01878299d049c4 100644 (file)
@@ -1,8 +1,10 @@
 /*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
  *
  * Copyright (C) 2010  Magnus Damm
  *
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644 (file)
index 0000000..94912d3
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ *  Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+       unsigned long flags;
+       struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+       struct pm_runtime_data *prd = res;
+
+       dev_dbg(dev, "__devres_release()\n");
+
+       if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+               clk_disable(prd->clk);
+
+       if (test_bit(BIT_ACTIVE, &prd->flags))
+               clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+       return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+                                    struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+               prd->clk = clk_get(dev, NULL);
+               if (!IS_ERR(prd->clk)) {
+                       set_bit(BIT_ACTIVE, &prd->flags);
+                       dev_info(dev, "clocks managed by runtime pm\n");
+               }
+       }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+                                   struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+               dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+       platform_pm_runtime_bug(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_disable(prd->clk);
+               clear_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+       platform_pm_runtime_init(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_enable(prd->clk);
+               set_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+       /* suspend synchronously to disable clocks immediately */
+       return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pm_runtime_data *prd;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       if (action == BUS_NOTIFY_BIND_DRIVER) {
+               prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+               if (prd)
+                       devres_add(dev, prd);
+               else
+                       dev_err(dev, "unable to alloc memory for runtime pm\n");
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct clk *clk;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       switch (action) {
+       case BUS_NOTIFY_BIND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_enable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced on\n");
+               }
+               break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_disable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced off\n");
+               }
+               break;
+       }
+
+       return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+       .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+       bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+}
+core_initcall(sh_pm_runtime_init);
index 33c3f570aaa06c2a56f6a6d70eb7f558a883b0b1..a0a2928ae4dd7670a1342863040791833b57dbab 100644 (file)
@@ -398,7 +398,7 @@ config CPU_V6
 # ARMv6k
 config CPU_32v6K
        bool "Support ARM V6K processor extensions" if !SMP
-       depends on CPU_V6
+       depends on CPU_V6 || CPU_V7
        default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
        help
          Say Y here if your ARMv6 processor supports the 'K' extension.
index c704eed63c5ddba4c5f849f7ab7b6420008cef21..4bc43e535d3baadc657df9af504078ff1ae00570 100644 (file)
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
                        }
                } while (size -= PAGE_SIZE);
 
+               dsb();
+
                return (void *)c->vm_start;
        }
        return NULL;
index 0527e65318f4a647b5b00192ce08ba47368ce5f8..6785db4179b84ccd925f9112cd48e9be71668e7c 100644 (file)
@@ -43,6 +43,7 @@ config ARCH_MXC91231
 config ARCH_MX5
        bool "MX5-based"
        select CPU_V7
+       select ARM_L1_CACHE_SHIFT_6
        help
          This enables support for systems based on the Freescale i.MX51 family
 
index 634e3f4c454df222728aa678bdcd657967286dbc..656acb45d434b7333e0864798fb6b69538387701 100644 (file)
@@ -37,9 +37,9 @@
  * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
  */
 
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
 extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
 extern void eukrea_mbimx51_baseboard_init(void);
 
 #endif
index b3da9aad4295704ef9ea8a4a43c95127d74e6d9e..3703ab28257fbbb55d3db89bee56877eebb38345 100644 (file)
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
                return -EAGAIN;
 
        for (i = 0; i < 4; i++) {
-               v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
-               __raw_writel(v, TZIC_WAKEUP0(i));
+               v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+                       wakeup_intr[i];
+               __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
        }
 
        return 0;
index 0732c6c8d511979e354cced2cd6987889702a1e7..ef32686feef9431ab00f42e4a2a0e2d7656af783 100644 (file)
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
 
 static int __devinit pwm_probe(struct platform_device *pdev)
 {
-       struct platform_device_id *id = platform_get_device_id(pdev);
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        struct pwm_device *pwm, *secondary = NULL;
        struct resource *r;
        int ret = 0;
index 48cbdcb6bbd4288929f31bef94da3691b160a489..55590a4d87c932984404d1df13ca4c296c9d7117 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2622,7 +2622,7 @@ kraken                    MACH_KRAKEN             KRAKEN                  2634
 gw2388                 MACH_GW2388             GW2388                  2635
 jadecpu                        MACH_JADECPU            JADECPU                 2636
 carlisle               MACH_CARLISLE           CARLISLE                2637
-lux_sf9                        MACH_LUX_SFT9           LUX_SFT9                2638
+lux_sf9                        MACH_LUX_SF9            LUX_SF9                 2638
 nemid_tb               MACH_NEMID_TB           NEMID_TB                2639
 terrier                        MACH_TERRIER            TERRIER                 2640
 turbot                 MACH_TURBOT             TURBOT                  2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr        MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
 netviz                 MACH_NETVIZ             NETVIZ                  2964
 flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
 wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
+lpc24xx                        MACH_LPC24XX            LPC24XX                 2967
+spica                  MACH_SPICA              SPICA                   2968
+gpsdisplay             MACH_GPSDISPLAY         GPSDISPLAY              2969
+bipnet                 MACH_BIPNET             BIPNET                  2970
+overo_ctu_inertial     MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL      2971
+davinci_dm355_mmm      MACH_DAVINCI_DM355_MMM  DAVINCI_DM355_MMM       2972
+pc9260_v2              MACH_PC9260_V2          PC9260_V2               2973
+ptx7545                        MACH_PTX7545            PTX7545                 2974
+tm_efdc                        MACH_TM_EFDC            TM_EFDC                 2975
+omap3_waldo1           MACH_OMAP3_WALDO1       OMAP3_WALDO1            2977
+flyer                  MACH_FLYER              FLYER                   2978
+tornado3240            MACH_TORNADO3240        TORNADO3240             2979
+soli_01                        MACH_SOLI_01            SOLI_01                 2980
+omapl138_europalc      MACH_OMAPL138_EUROPALC  OMAPL138_EUROPALC       2981
+helios_v1              MACH_HELIOS_V1          HELIOS_V1               2982
+netspace_lite_v2       MACH_NETSPACE_LITE_V2   NETSPACE_LITE_V2        2983
+ssc                    MACH_SSC                SSC                     2984
+premierwave_en         MACH_PREMIERWAVE_EN     PREMIERWAVE_EN          2985
+wasabi                 MACH_WASABI             WASABI                  2986
+vivow                  MACH_VIVOW              VIVOW                   2987
+mx50_rdp               MACH_MX50_RDP           MX50_RDP                2988
+universal              MACH_UNIVERSAL          UNIVERSAL               2989
+real6410               MACH_REAL6410           REAL6410                2990
+spx_sakura             MACH_SPX_SAKURA         SPX_SAKURA              2991
+ij3k_2440              MACH_IJ3K_2440          IJ3K_2440               2992
+omap3_bc10             MACH_OMAP3_BC10         OMAP3_BC10              2993
+thebe                  MACH_THEBE              THEBE                   2994
+rv082                  MACH_RV082              RV082                   2995
+armlguest              MACH_ARMLGUEST          ARMLGUEST               2996
+tjinc1000              MACH_TJINC1000          TJINC1000               2997
+dockstar               MACH_DOCKSTAR           DOCKSTAR                2998
+ax8008                 MACH_AX8008             AX8008                  2999
+gnet_sgce              MACH_GNET_SGCE          GNET_SGCE               3000
+pxwnas_500_1000                MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
+ea20                   MACH_EA20               EA20                    3002
+awm2                   MACH_AWM2               AWM2                    3003
+ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
+tegra_seaboard         MACH_TEGRA_SEABOARD     TEGRA_SEABOARD          3005
+linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
+tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
+rubys                  MACH_RUBYS              RUBYS                   3008
+aquarius               MACH_AQUARIUS           AQUARIUS                3009
+mx53_ard               MACH_MX53_ARD           MX53_ARD                3010
+mx53_smd               MACH_MX53_SMD           MX53_SMD                3011
+lswxl                  MACH_LSWXL              LSWXL                   3012
+dove_avng_v3           MACH_DOVE_AVNG_V3       DOVE_AVNG_V3            3013
+sdi_ess_9263           MACH_SDI_ESS_9263       SDI_ESS_9263            3014
+jocpu550               MACH_JOCPU550           JOCPU550                3015
+msm8x60_rumi3          MACH_MSM8X60_RUMI3      MSM8X60_RUMI3           3016
+msm8x60_ffa            MACH_MSM8X60_FFA        MSM8X60_FFA             3017
+yanomami               MACH_YANOMAMI           YANOMAMI                3018
+gta04                  MACH_GTA04              GTA04                   3019
+cm_a510                        MACH_CM_A510            CM_A510                 3020
+omap3_rfs200           MACH_OMAP3_RFS200       OMAP3_RFS200            3021
+kx33xx                 MACH_KX33XX             KX33XX                  3022
+ptx7510                        MACH_PTX7510            PTX7510                 3023
+top9000                        MACH_TOP9000            TOP9000                 3024
+teenote                        MACH_TEENOTE            TEENOTE                 3025
+ts3                    MACH_TS3                TS3                     3026
+a0                     MACH_A0                 A0                      3027
+fsm9xxx_surf           MACH_FSM9XXX_SURF       FSM9XXX_SURF            3028
+fsm9xxx_ffa            MACH_FSM9XXX_FFA        FSM9XXX_FFA             3029
+frrhwcdma60w           MACH_FRRHWCDMA60W       FRRHWCDMA60W            3030
+remus                  MACH_REMUS              REMUS                   3031
+at91cap7xdk            MACH_AT91CAP7XDK        AT91CAP7XDK             3032
+at91cap7stk            MACH_AT91CAP7STK        AT91CAP7STK             3033
+kt_sbc_sam9_1          MACH_KT_SBC_SAM9_1      KT_SBC_SAM9_1           3034
+oratisrouter           MACH_ORATISROUTER       ORATISROUTER            3035
+armada_xp_db           MACH_ARMADA_XP_DB       ARMADA_XP_DB            3036
+spdm                   MACH_SPDM               SPDM                    3037
+gtib                   MACH_GTIB               GTIB                    3038
+dgm3240                        MACH_DGM3240            DGM3240                 3039
+atlas_i_lpe            MACH_ATLAS_I_LPE        ATLAS_I_LPE             3040
+htcmega                        MACH_HTCMEGA            HTCMEGA                 3041
+tricorder              MACH_TRICORDER          TRICORDER               3042
+tx28                   MACH_TX28               TX28                    3043
+bstbrd                 MACH_BSTBRD             BSTBRD                  3044
+pwb3090                        MACH_PWB3090            PWB3090                 3045
+idea6410               MACH_IDEA6410           IDEA6410                3046
+qbc9263                        MACH_QBC9263            QBC9263                 3047
+borabora               MACH_BORABORA           BORABORA                3048
+valdez                 MACH_VALDEZ             VALDEZ                  3049
+ls9g20                 MACH_LS9G20             LS9G20                  3050
+mios_v1                        MACH_MIOS_V1            MIOS_V1                 3051
+s5pc110_crespo         MACH_S5PC110_CRESPO     S5PC110_CRESPO          3052
+controltek9g20         MACH_CONTROLTEK9G20     CONTROLTEK9G20          3053
+tin307                 MACH_TIN307             TIN307                  3054
+tin510                 MACH_TIN510             TIN510                  3055
+bluecheese             MACH_BLUECHEESE         BLUECHEESE              3057
+tem3x30                        MACH_TEM3X30            TEM3X30                 3058
+harvest_desoto         MACH_HARVEST_DESOTO     HARVEST_DESOTO          3059
+msm8x60_qrdc           MACH_MSM8X60_QRDC       MSM8X60_QRDC            3060
+spear900               MACH_SPEAR900           SPEAR900                3061
+pcontrol_g20           MACH_PCONTROL_G20       PCONTROL_G20            3062
index a67aeed17d405fbc37e3a44a860c67a3a1dd9fcf..debc5ed96d6e087a2e241e47421f537a13bf1feb 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
 #define __ARCH_POWERPC_ASM_FSLDMA_H__
 
+#include <linux/slab.h>
 #include <linux/dmaengine.h>
 
 /*
index f35eb45d6576258e7dba242934d76376380b531c..c4191b3b7056c6ad16563375f529d1480668a26e 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
index 224392d8fe8c095390439a10ea45af2e21bc0930..5e975298fa819ff3ca648e647f531700edb1f408 100644 (file)
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
-       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
                kfree(b);
                err = -ENOMEM;
                goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        cpumask_setall(b->cpus);
 #else
-       cpumask_copy(b->cpus, c->llc_shared_map);
+       cpumask_set_cpu(cpu, b->cpus);
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
index c2a8b26d4feacf4ac6b6b022c0a9c590fbbcef85..d9368eeda3090eb9f53482704e000e600b6237e3 100644 (file)
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+                               unsigned int cpu)
 {
        int err;
-       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
        if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
                mutex_lock(&therm_cpu_lock);
-               err = thermal_throttle_add_dev(sys_dev);
+               err = thermal_throttle_add_dev(sys_dev, cpu);
                mutex_unlock(&therm_cpu_lock);
                WARN_ON(err);
                break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
 #endif
        /* connect live CPUs to sysfs */
        for_each_online_cpu(cpu) {
-               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
                WARN_ON(err);
        }
 #ifdef CONFIG_HOTPLUG_CPU
index f2da20fda02ddf6fcd449a88ba399fe4ed44af2a..3efdf2870a3572263add749326aa5925df7c461f 100644 (file)
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                /*
                 * event overflow
                 */
-               handled         = 1;
+               handled++;
                data.period     = event->hw.last_period;
 
                if (!x86_perf_event_set_period(event))
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
+struct pmu_nmi_state {
+       unsigned int    marked;
+       int             handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
 static int __kprobes
 perf_event_nmi_handler(struct notifier_block *self,
                         unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
-       struct pt_regs *regs;
+       unsigned int this_nmi;
+       int handled;
 
        if (!atomic_read(&active_events))
                return NOTIFY_DONE;
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
        case DIE_NMI:
        case DIE_NMI_IPI:
                break;
-
+       case DIE_NMIUNKNOWN:
+               this_nmi = percpu_read(irq_stat.__nmi_count);
+               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+                       /* let the kernel handle the unknown nmi */
+                       return NOTIFY_DONE;
+               /*
+                * This one is a PMU back-to-back nmi. Two events
+                * trigger 'simultaneously' raising two back-to-back
+                * NMIs. If the first NMI handles both, the latter
+                * will be empty and daze the CPU. So, we drop it to
+                * avoid false-positive 'unknown nmi' messages.
+                */
+               return NOTIFY_STOP;
        default:
                return NOTIFY_DONE;
        }
 
-       regs = args->regs;
-
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /*
-        * Can't rely on the handled return value to say it was our NMI, two
-        * events could trigger 'simultaneously' raising two back-to-back NMIs.
-        *
-        * If the first NMI handles both, the latter will be empty and daze
-        * the CPU.
-        */
-       x86_pmu.handle_irq(regs);
+
+       handled = x86_pmu.handle_irq(args->regs);
+       if (!handled)
+               return NOTIFY_DONE;
+
+       this_nmi = percpu_read(irq_stat.__nmi_count);
+       if ((handled > 1) ||
+               /* the next nmi could be a back-to-back nmi */
+           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+            (__get_cpu_var(pmu_nmi).handled > 1))) {
+               /*
+                * We could have two subsequent back-to-back nmis: The
+                * first handles more than one counter, the 2nd
+                * handles only one counter and the 3rd handles no
+                * counter.
+                *
+                * This is the 2nd nmi because the previous was
+                * handling more than one counter. We will mark the
+                * next (3rd) and then drop it if unhandled.
+                */
+               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
+               __get_cpu_var(pmu_nmi).handled  = handled;
+       }
 
        return NOTIFY_STOP;
 }
index d8d86d01400866c6320001fb715301276747cd52..ee05c90012d269e66de9ffb6a5952d1434317c1e 100644 (file)
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        int bit, loops;
-       u64 ack, status;
+       u64 status;
+       int handled = 0;
 
        perf_sample_data_init(&data, 0);
 
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 
        loops = 0;
 again:
+       intel_pmu_ack_status(status);
        if (++loops > 100) {
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
        }
 
        inc_irq_stat(apic_perf_irqs);
-       ack = status;
 
        intel_pmu_lbr_read();
 
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
-       if (__test_and_clear_bit(62, (unsigned long *)&status))
+       if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+               handled++;
                x86_pmu.drain_pebs(regs);
+       }
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
+               handled++;
+
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -761,8 +766,6 @@ again:
                        x86_pmu_stop(event);
        }
 
-       intel_pmu_ack_status(ack);
-
        /*
         * Repeat if there is more work to be done:
         */
@@ -772,7 +775,7 @@ again:
 
 done:
        intel_pmu_enable_all(0);
-       return 1;
+       return handled;
 }
 
 static struct event_constraint *
index 7e578e9cc58bd5062d30776d431dabdcf724ac67..b560db3305be16ff954fc416137d17b17189b5e7 100644 (file)
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                inc_irq_stat(apic_perf_irqs);
        }
 
-       return handled > 0;
+       return handled;
 }
 
 /*
index a874495b3673baeb27467d144995d885f2f94ebc..e2a5952573905b2eeac3d18060be54532dbfdfde 100644 (file)
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
        /* Copy kernel address range */
        clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
                        swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS,
-                             KERNEL_PGD_BOUNDARY));
+                       KERNEL_PGD_PTRS);
 
        /* Initialize low mappings */
        clone_pgd_range(trampoline_pg_dir,
index 84e236ce76ba9a8afd624cfc4c506ebaa654b926..72fc70cf6184c756b1157f272b0d5e2b7bcc0609 100644 (file)
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 /*
  * Map 'pfn' using fixed map 'type' and protections 'prot'
  */
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
        /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
index f6b48f6c595176a59c8e2fe5fa145bc11acca118..cfe4faabb0f6792aebfb8330dd55f02406b148dc 100644 (file)
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
        int error;
 
        error = sysdev_class_register(&oprofile_sysclass);
-       if (!error)
-               error = sysdev_register(&device_oprofile);
+       if (error)
+               return error;
+
+       error = sysdev_register(&device_oprofile);
+       if (error)
+               sysdev_class_unregister(&oprofile_sysclass);
+
        return error;
 }
 
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
 }
 
 #else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int  init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
 #endif /* CONFIG_PM */
 
 static int __init p4_init(char **cpu_type)
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        char *cpu_type = NULL;
        int ret = 0;
 
+       using_nmi = 0;
+
        if (!cpu_has_apic)
                return -ENODEV;
 
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        mux_init(ops);
 
-       init_sysfs();
+       ret = init_sysfs();
+       if (ret)
+               return ret;
+
        using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
index 2bbeaaea46e9b7765ce983374fb125b7dd5422c9..38df8c19e74cc56903d5985cdbee7d52df3dc0d9 100644 (file)
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KIOCSOUND:
                if (!perm)
                        goto eperm;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
+               /*
+                * The use of PIT_TICK_RATE is historic, it used to be
+                * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+                * and 2.6.36, which was a minor but unfortunate ABI
+                * change.
+                */
                if (arg)
-                       arg = CLOCK_TICK_RATE / arg;
+                       arg = PIT_TICK_RATE / arg;
                kd_mksound(arg, 0);
                break;
 
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 */
                ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
                count = ticks ? (arg & 0xffff) : 0;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
                if (count)
-                       count = CLOCK_TICK_RATE / count;
+                       count = PIT_TICK_RATE / count;
                kd_mksound(count, ticks);
                break;
        }
index b42f42ca70c3c9454bb00ff7cf2e8505f74e482d..823559ab0e243610ca8e5ff3b5983aca3d53b7bb 100644 (file)
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
        return err;
 }
 
-static int sx150x_init_hw(struct sx150x_chip *chip,
-                       struct sx150x_platform_data *pdata)
+static int sx150x_reset(struct sx150x_chip *chip)
 {
-       int err = 0;
+       int err;
 
-       err = i2c_smbus_write_word_data(chip->client,
+       err = i2c_smbus_write_byte_data(chip->client,
                                        chip->dev_cfg->reg_reset,
-                                       0x3412);
+                                       0x12);
        if (err < 0)
                return err;
 
+       err = i2c_smbus_write_byte_data(chip->client,
+                                       chip->dev_cfg->reg_reset,
+                                       0x34);
+       return err;
+}
+
+static int sx150x_init_hw(struct sx150x_chip *chip,
+                       struct sx150x_platform_data *pdata)
+{
+       int err = 0;
+
+       if (pdata->reset_during_probe) {
+               err = sx150x_reset(chip);
+               if (err < 0)
+                       return err;
+       }
+
        err = sx150x_i2c_write(chip->client,
                        chip->dev_cfg->reg_misc,
                        0x01);
index 7580f55e67e3cf1560437b428d9fb1e5b8e411d0..36e95753223059ab0e1b5ed8490fe7364b39673e 100644 (file)
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+       AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+       AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index 8f0caf7d4482079ef45aa9ea3b8a66d6a37500d4..78fbe9ffe7f024f3f4e1ca486bcbeb5976087e27 100644 (file)
@@ -53,7 +53,7 @@
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
 #define T3_MAX_NUM_STAG (1<<15)
 #define T3_MAX_MR_SIZE 0x100000000ULL
 #define T3_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
index 443cea55daac5973469cf43fabe2a388abeadb55..61e0efd4ccfb5d9d4d6f6bdc50f765d365c07d0e 100644 (file)
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 static void nes_retrans_expired(struct nes_cm_node *cm_node)
 {
        struct iw_cm_id *cm_id = cm_node->cm_id;
-       switch (cm_node->state) {
+       enum nes_cm_node_state state = cm_node->state;
+       cm_node->state = NES_CM_STATE_CLOSED;
+       switch (state) {
        case NES_CM_STATE_SYN_RCVD:
        case NES_CM_STATE_CLOSING:
                rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
        case NES_CM_STATE_FIN_WAIT1:
                if (cm_node->cm_id)
                        cm_id->rem_ref(cm_id);
-               cm_node->state = NES_CM_STATE_CLOSED;
                send_reset(cm_node, NULL);
                break;
        default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                break;
        case NES_CM_STATE_MPAREQ_RCVD:
                passive_state = atomic_add_return(1, &cm_node->passive_state);
-               if (passive_state ==  NES_SEND_RESET_EVENT)
-                       create_event(cm_node, NES_CM_EVENT_RESET);
-               cm_node->state = NES_CM_STATE_CLOSED;
                dev_kfree_skb_any(skb);
                break;
        case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
        case NES_CM_STATE_CLOSED:
                drop_packet(skb);
                break;
+       case NES_CM_STATE_FIN_WAIT2:
        case NES_CM_STATE_FIN_WAIT1:
        case NES_CM_STATE_LAST_ACK:
                cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                return -EINVAL;
        }
 
+       passive_state = atomic_add_return(1, &cm_node->passive_state);
+       if (passive_state == NES_SEND_RESET_EVENT) {
+               rem_ref_cm_node(cm_node->cm_core, cm_node);
+               return -ECONNRESET;
+       }
+
        /* associate the node with the QP */
        nesqp->cm_node = (void *)cm_node;
        cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
                        "ret=%d\n", __func__, __LINE__, ret);
 
-       passive_state = atomic_add_return(1, &cm_node->passive_state);
-       if (passive_state == NES_SEND_RESET_EVENT)
-               create_event(cm_node, NES_CM_EVENT_RESET);
        return 0;
 }
 
index f8233c851c694862d76861e515b93183b8d387a5..1980a461c49904e93102e02e655e5b033c5f92be 100644 (file)
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                return; /* Ignore it, wait for close complete */
 
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+                               if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+                                       (nesqp->ibqp_state == IB_QPS_RTS) &&
+                                       ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+                                       spin_lock_irqsave(&nesqp->lock, flags);
+                                       nesqp->hw_iwarp_state = iwarp_state;
+                                       nesqp->hw_tcp_state = tcp_state;
+                                       nesqp->last_aeq = async_event_id;
+                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+                                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+                                       spin_unlock_irqrestore(&nesqp->lock, flags);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                                       nes_cm_disconn(nesqp);
+                               }
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
                                                NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
-
                        break;
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
                        if (nesqp->term_flags) {
index aa9183db32b104aaaa7bfad081c3c969699cec72..1204c3432b6322f23518c42d550746f19d9e4ce6 100644 (file)
@@ -45,6 +45,7 @@
 #define NES_PHY_TYPE_KR               9
 
 #define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
 
 enum pci_regs {
        NES_INT_STAT = 0x0000,
index 6dfdd49cdbcf36ef5cd68aee3caf46dbdaeb77f0..10560c796fd6c0ffc591601c610579d3e1b6e8ca 100644 (file)
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 0;
        } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
                u32temp = nes_read_indexed(nesdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 1;
        }
        if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
index a9b025f4147a0692845d2407b6efbd9220f9837e..ab6982056518e3c086c57738f360574b211108aa 100644 (file)
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
  *
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
  */
 int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
 {
+       int i;
+
        if (!num_slots)
                return 0;
 
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
        dev->mtsize = num_slots;
        input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
 
+       /* Mark slots as 'unused' */
+       for (i = 0; i < num_slots; i++)
+               dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
        return 0;
 }
 EXPORT_SYMBOL(input_mt_create_slots);
index ea67c49146a3a03280ee8719c362c41d8033c743..b952317639116f2f18a7bc1f41ff5887c17f2a49 100644 (file)
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
                               const struct bcm5974_config *cfg,
                               const struct tp_finger *f)
 {
-       input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
-       input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
-       input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
-       input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+       input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+                        raw2int(f->force_major) << 1);
+       input_report_abs(input, ABS_MT_TOUCH_MINOR,
+                        raw2int(f->force_minor) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+                        raw2int(f->size_major) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MINOR,
+                        raw2int(f->size_minor) << 1);
        input_report_abs(input, ABS_MT_ORIENTATION,
                         MAX_FINGER_ORIENTATION - raw2int(f->orientation));
        input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
index 46e4ba0b92463184d5e398345e50ca68809628b2..f585131604806f531f91c48ebb867919339fd80f 100644 (file)
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
 
 static void __exit i8042_exit(void)
 {
-       platform_driver_unregister(&i8042_driver);
        platform_device_unregister(i8042_platform_device);
+       platform_driver_unregister(&i8042_driver);
        i8042_platform_exit();
 
        panic_blink = NULL;
index 40d77ba8fdc138ff98b0320b877358a5302d7a28..6e29badb969e44192be85080caf801851c3d06fa 100644 (file)
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
                        if (features->type == WACOM_G4 ||
                                        features->type == WACOM_MO) {
                                input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
-                               rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+                               rw = (data[7] & 0x04) - (data[7] & 0x03);
                        } else {
                                input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
-                               rw = -(signed)data[6];
+                               rw = -(signed char)data[6];
                        }
                        input_report_rel(input, REL_WHEEL, rw);
                }
index bd2755e8d9a3d327f7ae54bedd0e7820f85ea5fe..f332c52968b75d7528ee8c5f21eaf561a76d373d 100644 (file)
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
                goto err;
        }
 
-       err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
-       if (!err) {
+       if (ocr & R4_MEMORY_PRESENT
+           && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
                card->type = MMC_TYPE_SD_COMBO;
 
                if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
index 5f3a599ead07bbdfae11f7abc0198285e743db81..87226cd202a5086f7d90699f0a43d6d4e99725a1 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/clk.h>
 #include <linux/atmel_pdc.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 
 #include <linux/mmc/host.h>
 
index 9a68ff4353a2e83878fce5429afe9351140daf57..5a950b16d9e629dc3d08041bda547b165cadee76 100644 (file)
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
 
                while (delay--) {
                        reg = readw(host->base + MMC_REG_STATUS);
-                       if (reg & STATUS_CARD_BUS_CLK_RUN)
+                       if (reg & STATUS_CARD_BUS_CLK_RUN) {
                                /* Check twice before cut */
                                reg = readw(host->base + MMC_REG_STATUS);
                                if (reg & STATUS_CARD_BUS_CLK_RUN)
                                        return 0;
+                       }
 
                        if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
                                return 0;
index 4a8776f8afdd690048c69de91e755d35d2c884a5..4526d2791f2990229acbe9ef0f5c88286819807f 100644 (file)
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
        int ret = 0;
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
-       pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
 
        if (host && host->suspended)
                return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
                        }
                }
                cancel_work_sync(&host->mmc_carddetect_work);
-               mmc_host_enable(host->mmc);
                ret = mmc_suspend_host(host->mmc);
+               mmc_host_enable(host->mmc);
                if (ret == 0) {
                        omap_hsmmc_disable_irq(host);
                        OMAP_HSMMC_WRITE(host->base, HCTL,
index 2e16e0a90a5e1a5d8d3d7487727baa39d701b8fb..976330de379ecc78cbe91f19c4bd9495d4722dae 100644 (file)
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
        host->pio_active        = XFER_NONE;
 
 #ifdef CONFIG_MMC_S3C_PIODMA
-       host->dodma             = host->pdata->dma;
+       host->dodma             = host->pdata->use_dma;
 #endif
 
        host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index ee7d0a5a51c496cb92b04e7b9bc8172f8a233875..69d98e3bf6abaa3c784d1d70f171387b3142eb64 100644 (file)
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_data *data = host->data;
+       void *sg_virt;
        unsigned short *buf;
        unsigned int count;
        unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
                return;
        }
 
-       buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
-             host->sg_off);
+       sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+       buf = (unsigned short *)(sg_virt + host->sg_off);
 
        count = host->sg_ptr->length - host->sg_off;
        if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
        host->sg_off += count;
 
-       tmio_mmc_kunmap_atomic(host, &flags);
+       tmio_mmc_kunmap_atomic(sg_virt, &flags);
 
        if (host->sg_off == host->sg_ptr->length)
                tmio_mmc_next_sg(host);
index 64f7d5dfc106ac7b39e37842c5eca9a898dbbb06..0fedc78e3ea5c4613767d7d31e534143b4bf1780 100644 (file)
 
 #define ack_mmc_irqs(host, i) \
        do { \
-               u32 mask;\
-               mask  = sd_ctrl_read32((host), CTL_STATUS); \
-               mask &= ~((i) & TMIO_MASK_IRQ); \
-               sd_ctrl_write32((host), CTL_STATUS, mask); \
+               sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
        } while (0)
 
 
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
        return --host->sg_len;
 }
 
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
        unsigned long *flags)
 {
-       struct scatterlist *sg = host->sg_ptr;
-
        local_irq_save(*flags);
        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 }
 
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
        unsigned long *flags)
 {
-       kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
        local_irq_restore(*flags);
 }
 
index a9352b2c7ac430d4e4aafac3d65a1b46005ea505..b7e755f4178ad885332ccaaeeb5eda492e6dfcfd 100644 (file)
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
        .notifier_call = module_load_notify,
 };
 
-
-static void end_sync(void)
-{
-       end_cpu_work();
-       /* make sure we don't leak task structs */
-       process_task_mortuary();
-       process_task_mortuary();
-}
-
-
 int sync_start(void)
 {
        int err;
@@ -158,7 +148,7 @@ int sync_start(void)
        if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
 
-       start_cpu_work();
+       mutex_lock(&buffer_mutex);
 
        err = task_handoff_register(&task_free_nb);
        if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
        if (err)
                goto out4;
 
+       start_cpu_work();
+
 out:
+       mutex_unlock(&buffer_mutex);
        return err;
 out4:
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
 out2:
        task_handoff_unregister(&task_free_nb);
 out1:
-       end_sync();
        free_cpumask_var(marked_cpus);
        goto out;
 }
@@ -190,11 +182,20 @@ out1:
 
 void sync_stop(void)
 {
+       /* flush buffers */
+       mutex_lock(&buffer_mutex);
+       end_cpu_work();
        unregister_module_notifier(&module_load_nb);
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
        profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
        task_handoff_unregister(&task_free_nb);
-       end_sync();
+       mutex_unlock(&buffer_mutex);
+       flush_scheduled_work();
+
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+
        free_cpumask_var(marked_cpus);
 }
 
index 219f79e2210a3fcd561b94456c4960a0a1fbacd9..f179ac2ea80149423034d66a90b1e81251c5069b 100644 (file)
@@ -120,8 +120,6 @@ void end_cpu_work(void)
 
                cancel_delayed_work(&b->work);
        }
-
-       flush_scheduled_work();
 }
 
 /*
index 72b2bcc2c22413b1a63e465e355ea65084ec7b8e..d4fb82d85e9b36ab61e1626236cb98bf76364ea2 100644 (file)
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                enable_irq_wake(IRQ_RTC);
                bfin_rtc_sync_pending(&pdev->dev);
        } else
-               bfin_rtc_int_clear(-1);
+               bfin_rtc_int_clear(0);
 
        return 0;
 }
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
 {
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(IRQ_RTC);
-       else
-               bfin_write_RTC_ISTAT(-1);
+
+       /*
+        * Since only some of the RTC bits are maintained externally in the
+        * Vbat domain, we need to wait for the RTC MMRs to be synced into
+        * the core after waking up.  This happens every RTC 1HZ.  Once that
+        * has happened, we can go ahead and re-enable the important write
+        * complete interrupt event.
+        */
+       while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+               continue;
+       bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
 
        return 0;
 }
index 66377f3e28b851eaa908c6057a9646a639e9c229..d60557cae8ef4fdadadb10b343125f174cb7508d 100644 (file)
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        t->time.tm_isdst = -1;
        t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
        t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
-       return rtc_valid_tm(t);
+       return 0;
 }
 
 static struct rtc_class_ops m41t80_rtc_ops = {
index 6c418fe7f288ae2deaa9f44080a749a9eaafad88..b7a6690e5b35e8744295bf212a8e0d75e0d8dd6f 100644 (file)
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        if (request_irq(adev->irq[0], pl031_interrupt,
-                       IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+                       IRQF_DISABLED, "rtc-pl031", ldata)) {
                ret = -EIO;
                goto out_no_irq;
        }
index c91a7f70f7b086f56882ba07f97ba38e314adfd6..5d786bd3e304f1a174e682461d7d94a265ae2119 100644 (file)
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
                              struct pxa168fb_mach_info *mi)
 {
        struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
        return ret;
 }
 
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
 {
        struct pxa168fb_mach_info *mi;
        struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
        .probe          = pxa168fb_probe,
 };
 
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
 {
        return platform_driver_register(&pxa168fb_driver);
 }
index a7528b91393676bb1f1affa72f6b78f38206d4c5..fd0cc0bf9a40396a150ad77b69bf5284531d6125 100644 (file)
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void)
 {
        int err = register_filesystem(&bm_fs_type);
        if (!err) {
-               err = register_binfmt(&misc_format);
+               err = insert_binfmt(&misc_format);
                if (err)
                        unregister_filesystem(&bm_fs_type);
        }
index 51f270b479b6938a4a730ea56f9011c30f99563f..48d74c7391d13f4f07393c45d19825e937ecbcd1 100644 (file)
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio)
        int ret = 0;
 
        if (dio->bio) {
-               loff_t cur_offset = dio->block_in_file << dio->blkbits;
+               loff_t cur_offset = dio->cur_page_fs_offset;
                loff_t bio_next_offset = dio->logical_offset_in_bio +
                        dio->bio->bi_size;
 
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio)
                 * Submit now if the underlying fs is about to perform a
                 * metadata read
                 */
-               if (dio->boundary)
+               else if (dio->boundary)
                        dio_bio_submit(dio);
        }
 
index 6769fd0f35b88373fdb8d0b265668a976a7ab251..f8cc34f542c3a1cc8b4f53309ffae317ecdddf15 100644 (file)
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync);
 
 static int __init fcntl_init(void)
 {
-       /* please add new bits here to ensure allocation uniqueness */
-       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       /*
+        * Please add new bits here to ensure allocation uniqueness.
+        * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+        * is defined as O_NONBLOCK on some platforms and not on others.
+        */
+       BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
-               O_TRUNC         | O_APPEND      | O_NONBLOCK    |
+               O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
index 69ad053ffd78cb0f2669516b5327571f37d65254..d367af1514efe696b50a73374fffe77784c65b6a 100644 (file)
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
  * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
        req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
 
 static void wait_answer_interruptible(struct fuse_conn *fc,
                                      struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (signal_pending(current))
                return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
 
 /* Wait until a request is available on the pending list */
 static void request_wait(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        DECLARE_WAITQUEUE(wait, current);
 
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
  */
 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
                               size_t nbytes, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        struct fuse_in_header ih;
        struct fuse_interrupt_in arg;
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(head)) {
                struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
        }
 }
 
+static void end_queued_requests(struct fuse_conn *fc)
+__releases(fc->lock)
+__acquires(fc->lock)
+{
+       fc->max_background = UINT_MAX;
+       flush_bg_queue(fc);
+       end_requests(fc, &fc->pending);
+       end_requests(fc, &fc->processing);
+}
+
 /*
  * Abort all requests.
  *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
                fc->connected = 0;
                fc->blocked = 0;
                end_io_requests(fc);
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               end_queued_requests(fc);
                wake_up_all(&fc->waitq);
                wake_up_all(&fc->blocked_waitq);
                kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
        if (fc) {
                spin_lock(&fc->lock);
                fc->connected = 0;
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               fc->blocked = 0;
+               end_queued_requests(fc);
+               wake_up_all(&fc->blocked_waitq);
                spin_unlock(&fc->lock);
                fuse_conn_put(fc);
        }
index 147c1f71bdb9f0213307fd3e63f6e3b30fc3f403..c8224587123f6e2ff84c8933f8d56a50ffd06c80 100644 (file)
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
 
 /* Called under fc->lock, may release and reacquire it */
 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
        loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
  * Called with fc->lock
  */
 void fuse_flush_writepages(struct inode *inode)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
index e20ee85955d1c77c3a410da2c82893cd38acce8e..f3f3578393a417085812ba1ad7e0b7c1e4e5c981 100644 (file)
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
 
        inode_inc_link_count(dir);
 
-       inode = minix_new_inode(dir, mode, &err);
+       inode = minix_new_inode(dir, S_IFDIR | mode, &err);
        if (!inode)
                goto out_dir;
 
index 3dfef062396845d2b45cc42a22064ec4402ee05f..cf0d2ffb3c84a149bc904323cd53599620c8c917 100644 (file)
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
 
 static int nfs4_access_to_omode(u32 access)
 {
-       switch (access) {
+       switch (access & NFS4_SHARE_ACCESS_BOTH) {
        case NFS4_SHARE_ACCESS_READ:
                return O_RDONLY;
        case NFS4_SHARE_ACCESS_WRITE:
index 215e12ce1d85e2079359838cf287f1c3c670ff31..592fae5007d1245baade87453ce731121aa6efe5 100644 (file)
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
        last_page_bytes = PAGE_ALIGN(end);
        index = start >> PAGE_CACHE_SHIFT;
        do {
-               pages[numpages] = grab_cache_page(mapping, index);
+               pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
                if (!pages[numpages]) {
                        ret = -ENOMEM;
                        mlog_errno(ret);
index ec6d123395932b69b6a67ba0b5256be02b811c6f..c7ee03c22226253d970cce94beb11f6353b3e1d0 100644 (file)
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
-            "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
+            "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
                goto out;
        }
 
-       mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
+       mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
index 81296b4e364632dd5936f59d8adeab9832f2d2fd..9a03c151b5ceabc169215d99777abb92e2d2ad36 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/writeback.h>
 #include <linux/falloc.h>
 #include <linux/quotaops.h>
+#include <linux/blkdev.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync)
        if (err)
                goto bail;
 
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
+               /*
+                * We still have to flush drive's caches to get data to the
+                * platter
+                */
+               if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+                       blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+                                          NULL, BLKDEV_IFL_WAIT);
                goto bail;
+       }
 
        journal = osb->journal->j_journal;
        err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
        BUG_ON(abs_from & (inode->i_blkbits - 1));
 
-       page = grab_cache_page(mapping, index);
+       page = find_or_create_page(mapping, index, GFP_NOFS);
        if (!page) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -2329,7 +2338,7 @@ out_dio:
        BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
 
        if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
-           ((file->f_flags & O_DIRECT) && has_refcount)) {
+           ((file->f_flags & O_DIRECT) && !direct_io)) {
                ret = filemap_fdatawrite_range(file->f_mapping, pos,
                                               pos + count - 1);
                if (ret < 0)
index 0492464916b19324e73425e29c473956b0b4bd33..eece3e05d9d0124d04b81c940c1289f876e700bb 100644 (file)
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
                                                     OCFS2_BH_IGNORE_CACHE);
        } else {
                status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
-               if (!status)
+               /*
+                * If buffer is in jbd, then its checksum may not have been
+                * computed as yet.
+                */
+               if (!status && !buffer_jbd(bh))
                        status = ocfs2_validate_inode_block(osb->sb, bh);
        }
        if (status < 0) {
index af2b8fe1f13999e26f6e2543a047847bcf517c4e..4c18f4ad93b43cae6e5ddcd5a2781fbc79292484 100644 (file)
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
        /*
         * Another node might have truncated while we were waiting on
         * cluster locks.
+        * We don't check size == 0 before the shift. This is borrowed
+        * from do_generic_file_read.
         */
-       last_index = size >> PAGE_CACHE_SHIFT;
-       if (page->index > last_index) {
+       last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+       if (unlikely(!size || page->index > last_index)) {
                ret = -EINVAL;
                goto out;
        }
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
         * because the "write" would invalidate their data.
         */
        if (page->index == last_index)
-               len = size & ~PAGE_CACHE_MASK;
+               len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
 
        ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
                                       &fsdata, di_bh, page);
index f171b51a74f78d6e268b5d743a24df4e702f9643..a00dda2e4f16698e5c7d8e0946ba1f09506651c6 100644 (file)
@@ -472,32 +472,23 @@ leave:
        return status;
 }
 
-static int ocfs2_mknod_locked(struct ocfs2_super *osb,
-                             struct inode *dir,
-                             struct inode *inode,
-                             dev_t dev,
-                             struct buffer_head **new_fe_bh,
-                             struct buffer_head *parent_fe_bh,
-                             handle_t *handle,
-                             struct ocfs2_alloc_context *inode_ac)
+static int __ocfs2_mknod_locked(struct inode *dir,
+                               struct inode *inode,
+                               dev_t dev,
+                               struct buffer_head **new_fe_bh,
+                               struct buffer_head *parent_fe_bh,
+                               handle_t *handle,
+                               struct ocfs2_alloc_context *inode_ac,
+                               u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
 {
        int status = 0;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_extent_list *fel;
-       u64 suballoc_loc, fe_blkno = 0;
-       u16 suballoc_bit;
        u16 feat;
 
        *new_fe_bh = NULL;
 
-       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
-                                      inode_ac, &suballoc_loc,
-                                      &suballoc_bit, &fe_blkno);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
        /* populate as many fields early on as possible - many of
         * these are used by the support functions here and in
         * callers. */
@@ -591,6 +582,34 @@ leave:
        return status;
 }
 
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+                             struct inode *dir,
+                             struct inode *inode,
+                             dev_t dev,
+                             struct buffer_head **new_fe_bh,
+                             struct buffer_head *parent_fe_bh,
+                             handle_t *handle,
+                             struct ocfs2_alloc_context *inode_ac)
+{
+       int status = 0;
+       u64 suballoc_loc, fe_blkno = 0;
+       u16 suballoc_bit;
+
+       *new_fe_bh = NULL;
+
+       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+                                      inode_ac, &suballoc_loc,
+                                      &suballoc_bit, &fe_blkno);
+       if (status < 0) {
+               mlog_errno(status);
+               return status;
+       }
+
+       return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+                                   parent_fe_bh, handle, inode_ac,
+                                   fe_blkno, suballoc_loc, suballoc_bit);
+}
+
 static int ocfs2_mkdir(struct inode *dir,
                       struct dentry *dentry,
                       int mode)
@@ -1852,61 +1871,117 @@ bail:
        return status;
 }
 
-static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-                                   struct inode **ret_orphan_dir,
-                                   u64 blkno,
-                                   char *name,
-                                   struct ocfs2_dir_lookup_result *lookup)
+static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
+                                       struct inode **ret_orphan_dir,
+                                       struct buffer_head **ret_orphan_dir_bh)
 {
        struct inode *orphan_dir_inode;
        struct buffer_head *orphan_dir_bh = NULL;
-       int status = 0;
-
-       status = ocfs2_blkno_stringify(blkno, name);
-       if (status < 0) {
-               mlog_errno(status);
-               return status;
-       }
+       int ret = 0;
 
        orphan_dir_inode = ocfs2_get_system_file_inode(osb,
                                                       ORPHAN_DIR_SYSTEM_INODE,
                                                       osb->slot_num);
        if (!orphan_dir_inode) {
-               status = -ENOENT;
-               mlog_errno(status);
-               return status;
+               ret = -ENOENT;
+               mlog_errno(ret);
+               return ret;
        }
 
        mutex_lock(&orphan_dir_inode->i_mutex);
 
-       status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
+       ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+       if (ret < 0) {
+               mutex_unlock(&orphan_dir_inode->i_mutex);
+               iput(orphan_dir_inode);
+
+               mlog_errno(ret);
+               return ret;
        }
 
-       status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
-                                             orphan_dir_bh, name,
-                                             OCFS2_ORPHAN_NAMELEN, lookup);
-       if (status < 0) {
-               ocfs2_inode_unlock(orphan_dir_inode, 1);
+       *ret_orphan_dir = orphan_dir_inode;
+       *ret_orphan_dir_bh = orphan_dir_bh;
 
-               mlog_errno(status);
-               goto leave;
+       return 0;
+}
+
+static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
+                                     struct buffer_head *orphan_dir_bh,
+                                     u64 blkno,
+                                     char *name,
+                                     struct ocfs2_dir_lookup_result *lookup)
+{
+       int ret;
+       struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+
+       ret = ocfs2_blkno_stringify(blkno, name);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+                                          orphan_dir_bh, name,
+                                          OCFS2_ORPHAN_NAMELEN, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
+ * insertion of an orphan.
+ * @osb: ocfs2 file system
+ * @ret_orphan_dir: Orphan dir inode - returned locked!
+ * @blkno: Actual block number of the inode to be inserted into orphan dir.
+ * @lookup: dir lookup result, to be passed back into functions like
+ *          ocfs2_orphan_add
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
+                                   struct inode **ret_orphan_dir,
+                                   u64 blkno,
+                                   char *name,
+                                   struct ocfs2_dir_lookup_result *lookup)
+{
+       struct inode *orphan_dir_inode = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       int ret = 0;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
+                                          &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
+                                        blkno, name, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
        }
 
        *ret_orphan_dir = orphan_dir_inode;
 
-leave:
-       if (status) {
+out:
+       brelse(orphan_dir_bh);
+
+       if (ret) {
+               ocfs2_inode_unlock(orphan_dir_inode, 1);
                mutex_unlock(&orphan_dir_inode->i_mutex);
                iput(orphan_dir_inode);
        }
 
-       brelse(orphan_dir_bh);
-
-       mlog_exit(status);
-       return status;
+       mlog_exit(ret);
+       return ret;
 }
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@ leave:
        return status;
 }
 
+/**
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * allocated file. This is different from the typical 'add to orphan dir'
+ * operation in that the inode does not yet exist. This is a problem because
+ * the orphan dir stringifies the inode block number to come up with it's
+ * dirent. Obviously if the inode does not yet exist we have a chicken and egg
+ * problem. This function works around it by calling deeper into the orphan
+ * and suballoc code than other callers. Use this only by necessity.
+ * @dir: The directory which this inode will ultimately wind up under - not the
+ * orphan dir!
+ * @dir_bh: buffer_head the @dir inode block
+ * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
+ * with the string to be used for orphan dirent. Pass back to the orphan dir
+ * code.
+ * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
+ * dir code.
+ * @ret_di_blkno: block number where the new inode will be allocated.
+ * @orphan_insert: Dir insert context to be passed back into orphan dir code.
+ * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prep_new_orphaned_file(struct inode *dir,
+                                       struct buffer_head *dir_bh,
+                                       char *orphan_name,
+                                       struct inode **ret_orphan_dir,
+                                       u64 *ret_di_blkno,
+                                       struct ocfs2_dir_lookup_result *orphan_insert,
+                                       struct ocfs2_alloc_context **ret_inode_ac)
+{
+       int ret;
+       u64 di_blkno;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+       struct inode *orphan_dir = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       struct ocfs2_alloc_context *inode_ac = NULL;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       /* reserve an inode spot */
+       ret = ocfs2_reserve_new_inode(osb, &inode_ac);
+       if (ret < 0) {
+               if (ret != -ENOSPC)
+                       mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
+                                      &di_blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
+                                        di_blkno, orphan_name, orphan_insert);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+out:
+       if (ret == 0) {
+               *ret_orphan_dir = orphan_dir;
+               *ret_di_blkno = di_blkno;
+               *ret_inode_ac = inode_ac;
+               /*
+                * orphan_name and orphan_insert are already up to
+                * date via prepare_orphan_dir
+                */
+       } else {
+               /* Unroll reserve_new_inode* */
+               if (inode_ac)
+                       ocfs2_free_alloc_context(inode_ac);
+
+               /* Unroll orphan dir locking */
+               mutex_unlock(&orphan_dir->i_mutex);
+               ocfs2_inode_unlock(orphan_dir, 1);
+               iput(orphan_dir);
+       }
+
+       brelse(orphan_dir_bh);
+
+       return 0;
+}
+
 int ocfs2_create_inode_in_orphan(struct inode *dir,
                                 int mode,
                                 struct inode **new_inode)
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
        struct buffer_head *new_di_bh = NULL;
        struct ocfs2_alloc_context *inode_ac = NULL;
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+       u64 uninitialized_var(di_blkno), suballoc_loc;
+       u16 suballoc_bit;
 
        status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
        if (status < 0) {
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                return status;
        }
 
-       /*
-        * We give the orphan dir the root blkno to fake an orphan name,
-        * and allocate enough space for our insertion.
-        */
-       status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
-                                         osb->root_blkno,
-                                         orphan_name, &orphan_insert);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
-       /* reserve an inode spot */
-       status = ocfs2_reserve_new_inode(osb, &inode_ac);
+       status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
+                                             orphan_name, &orphan_dir,
+                                             &di_blkno, &orphan_insert, &inode_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                goto leave;
        did_quota_inode = 1;
 
-       inode->i_nlink = 0;
-       /* do the real work now. */
-       status = ocfs2_mknod_locked(osb, dir, inode,
-                                   0, &new_di_bh, parent_di_bh, handle,
-                                   inode_ac);
+       status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
+                                             &suballoc_loc,
+                                             &suballoc_bit, di_blkno);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
        }
 
-       status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+       inode->i_nlink = 0;
+       /* do the real work now. */
+       status = __ocfs2_mknod_locked(dir, inode,
+                                     0, &new_di_bh, parent_di_bh, handle,
+                                     inode_ac, di_blkno, suballoc_loc,
+                                     suballoc_bit);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
index 73a11ccfd4c280681abe672c5e9cd81e3b229a93..0afeda83120fa0e54bd2c9cc7e1f4f08c6e836bf 100644 (file)
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_CACHE_SIZE - 1))
                        to = map_end & (PAGE_CACHE_SIZE - 1);
 
-               page = grab_cache_page(mapping, page_index);
+               page = find_or_create_page(mapping, page_index, GFP_NOFS);
 
                /*
                 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
                if (map_end > end)
                        map_end = end;
 
-               page = grab_cache_page(context->inode->i_mapping, page_index);
+               page = find_or_create_page(context->inode->i_mapping,
+                                          page_index, GFP_NOFS);
                BUG_ON(!page);
 
                wait_on_page_writeback(page);
index a8e6a95a353f03dcb8a34cf928ded84ff7e6d127..8a286f54dca1f30a1d65b0d6cbb5baa0fa8c063c 100644 (file)
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result {
        u64             sr_bg_blkno;    /* The bg we allocated from.  Set
                                           to 0 when a block group is
                                           contiguous. */
+       u64             sr_bg_stable_blkno; /*
+                                            * Doesn't change, always
+                                            * set to target block
+                                            * group descriptor
+                                            * block.
+                                            */
        u64             sr_blkno;       /* The first allocated block */
        unsigned int    sr_bit_offset;  /* The bit in the bg */
        unsigned int    sr_bits;        /* How many bits we claimed */
 };
 
+static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
+{
+       if (res->sr_blkno == 0)
+               return 0;
+
+       if (res->sr_bg_blkno)
+               return res->sr_bg_blkno;
+
+       return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
+}
+
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
        brelse(ac->ac_bh);
        ac->ac_bh = NULL;
        ac->ac_resv = NULL;
+       if (ac->ac_find_loc_priv) {
+               kfree(ac->ac_find_loc_priv);
+               ac->ac_find_loc_priv = NULL;
+       }
 }
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (!ret)
                ocfs2_bg_discontig_fix_result(ac, gd, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
+
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
        ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
                                               res->sr_bits,
                                               le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (ret < 0)
                mlog_errno(ret);
 
+out_loc_only:
        *bits_left = le16_to_cpu(gd->bg_free_bits_count);
 
 out:
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
 {
        int status;
        u16 chain;
-       u32 tmp_used;
        u64 next_group;
        struct inode *alloc_inode = ac->ac_inode;
        struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        if (!status)
                ocfs2_bg_discontig_fix_result(ac, bg, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
 
        /*
         * Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
                }
        }
 
-       /* Ok, claim our bits now: set the info on dinode, chainlist
-        * and then the group */
-       status = ocfs2_journal_access_di(handle,
-                                        INODE_CACHE(alloc_inode),
-                                        ac->ac_bh,
-                                        OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
+       status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
+                                                 ac->ac_bh, res->sr_bits,
+                                                 chain);
+       if (status) {
                mlog_errno(status);
                goto bail;
        }
 
-       tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
-       fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
-       le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
-       ocfs2_journal_dirty(handle, ac->ac_bh);
-
        status = ocfs2_block_group_set_bits(handle,
                                            alloc_inode,
                                            bg,
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
             (unsigned long long)le64_to_cpu(fe->i_blkno));
 
+out_loc_only:
        *bits_left = le16_to_cpu(bg->bg_free_bits_count);
 bail:
        brelse(group_bh);
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
        int status;
        u16 victim, i;
        u16 bits_left = 0;
+       u64 hint = ac->ac_last_group;
        struct ocfs2_chain_list *cl;
        struct ocfs2_dinode *fe;
 
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                goto bail;
        }
 
-       res->sr_bg_blkno = ac->ac_last_group;
+       res->sr_bg_blkno = hint;
        if (res->sr_bg_blkno) {
                /* Attempt to short-circuit the usual search mechanism
                 * by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
 
        status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                    res, &bits_left);
-       if (!status)
+       if (!status) {
+               hint = ocfs2_group_from_res(res);
                goto set_hint;
+       }
        if (status < 0 && status != -ENOSPC) {
                mlog_errno(status);
                goto bail;
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                ac->ac_chain = i;
                status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                            res, &bits_left);
-               if (!status)
+               if (!status) {
+                       hint = ocfs2_group_from_res(res);
                        break;
+               }
                if (status < 0 && status != -ENOSPC) {
                        mlog_errno(status);
                        goto bail;
@@ -1936,7 +1972,7 @@ set_hint:
                if (bits_left < min_bits)
                        ac->ac_last_group = 0;
                else
-                       ac->ac_last_group = res->sr_bg_blkno;
+                       ac->ac_last_group = hint;
        }
 
 bail:
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
        OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
 }
 
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno)
+{
+       int ret;
+       handle_t *handle = NULL;
+       struct ocfs2_suballoc_result *res;
+
+       BUG_ON(!ac);
+       BUG_ON(ac->ac_bits_given != 0);
+       BUG_ON(ac->ac_bits_wanted != 1);
+       BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+
+       res = kzalloc(sizeof(*res), GFP_NOFS);
+       if (res == NULL) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
+
+       /*
+        * The handle started here is for chain relink. Alternatively,
+        * we could just disable relink for these calls.
+        */
+       handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       /*
+        * This will instruct ocfs2_claim_suballoc_bits and
+        * ocfs2_search_one_group to search but save actual allocation
+        * for later.
+        */
+       ac->ac_find_loc_only = 1;
+
+       ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ac->ac_find_loc_priv = res;
+       *fe_blkno = res->sr_blkno;
+
+out:
+       if (handle)
+               ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
+
+       if (ret)
+               kfree(res);
+
+       return ret;
+}
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno)
+{
+       int ret;
+       u16 chain;
+       struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
+       struct buffer_head *bg_bh = NULL;
+       struct ocfs2_group_desc *bg;
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+
+       /*
+        * Since di_blkno is being passed back in, we check for any
+        * inconsistencies which may have happened between
+        * calls. These are code bugs as di_blkno is not expected to
+        * change once returned from ocfs2_find_new_inode_loc()
+        */
+       BUG_ON(res->sr_blkno != di_blkno);
+
+       ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
+                                         res->sr_bg_stable_blkno, &bg_bh);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+       chain = le16_to_cpu(bg->bg_chain);
+
+       ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
+                                              ac->ac_bh, res->sr_bits,
+                                              chain);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_block_group_set_bits(handle,
+                                        ac->ac_inode,
+                                        bg,
+                                        bg_bh,
+                                        res->sr_bit_offset,
+                                        res->sr_bits);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
+            (unsigned long long)di_blkno);
+
+       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+       BUG_ON(res->sr_bits != 1);
+
+       *suballoc_loc = res->sr_bg_blkno;
+       *suballoc_bit = res->sr_bit_offset;
+       ac->ac_bits_given++;
+       ocfs2_save_inode_ac_group(dir, ac);
+
+out:
+       brelse(bg_bh);
+
+       return ret;
+}
+
 int ocfs2_claim_new_inode(handle_t *handle,
                          struct inode *dir,
                          struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@ out:
  * suballoc_bit.
  */
 static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
-                                      u16 *suballoc_slot, u16 *suballoc_bit)
+                                      u16 *suballoc_slot, u64 *group_blkno,
+                                      u16 *suballoc_bit)
 {
        int status;
        struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
                *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
        if (suballoc_bit)
                *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
+       if (group_blkno)
+               *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
 
 bail:
        brelse(inode_bh);
@@ -2621,7 +2790,8 @@ bail:
  */
 static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                                   struct inode *suballoc,
-                                  struct buffer_head *alloc_bh, u64 blkno,
+                                  struct buffer_head *alloc_bh,
+                                  u64 group_blkno, u64 blkno,
                                   u16 bit, int *res)
 {
        struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                goto bail;
        }
 
-       if (alloc_di->i_suballoc_loc)
-               bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
-       else
-               bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+       bg_blkno = group_blkno ? group_blkno :
+                  ocfs2_which_suballoc_group(blkno, bit);
        status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
                                             &group_bh);
        if (status < 0) {
@@ -2680,6 +2848,7 @@ bail:
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
 {
        int status;
+       u64 group_blkno = 0;
        u16 suballoc_bit = 0, suballoc_slot = 0;
        struct inode *inode_alloc_inode;
        struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        mlog_entry("blkno: %llu", (unsigned long long)blkno);
 
        status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
-                                            &suballoc_bit);
+                                            &group_blkno, &suballoc_bit);
        if (status < 0) {
                mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
                goto bail;
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        }
 
        status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
-                                        blkno, suballoc_bit, res);
+                                        group_blkno, blkno, suballoc_bit, res);
        if (status < 0)
                mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
 
index a017dd3ee7d9ce2d6c0429d090b57ac077ed585a..b8afabfeede4c43694bdb8bf0a9664b0befd24a6 100644 (file)
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context {
        u64    ac_max_block;  /* Highest block number to allocate. 0 is
                                 is the same as ~0 - unlimited */
 
+       int    ac_find_loc_only;  /* hack for reflink operation ordering */
+       struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
+
        struct ocfs2_alloc_reservation  *ac_resv;
 };
 
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et,
                          struct ocfs2_alloc_context **meta_ac);
 
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
+
+
+
+/*
+ * The following two interfaces are for ocfs2_create_inode_in_orphan().
+ */
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno);
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno);
+
 #endif /* _CHAINALLOC_H_ */
index 180cf5a0bd67119218c170b265cf944c103b889b..3b8b456603318f1ff017056cdeda40129d559ab4 100644 (file)
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_HWPOISON,      PG_hwpoison);
 #endif
 
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
 #endif
 
index 439fc1f1c1c41487ad76d23523d995a9f416b926..271afc48b9a5d58dd2874d41f8a08dfcae644481 100644 (file)
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
        if (vma->vm_flags & VM_GROWSDOWN)
-               start += PAGE_SIZE;
+               if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+                       start += PAGE_SIZE;
 
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
index c7376bf80b0604bf8f8b9394989178d6de45ecc2..8ca18e26d7e39fe429a8179d48f2f9f17f58a589 100644 (file)
  * While the GPIO programming interface defines valid GPIO numbers
  * to be in the range 0..MAX_INT, this library restricts them to the
  * smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
  */
 
 #ifndef ARCH_NR_GPIOS
 #define ARCH_NR_GPIOS          256
 #endif
 
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request().  only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
 static inline int gpio_is_valid(int number)
 {
-       /* only some non-negative numbers are valid */
        return ((unsigned)number) < ARCH_NR_GPIOS;
 }
 
index ed3e92e41c6e5683ad3dbb823e48259f5150ac33..0c991023ee475fad85136a04b00cb8d2699a382b 100644 (file)
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+       return cgroup_attach_task_all(current, tsk);
+}
 
 /*
  * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
 }
 
 /* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+                                        struct task_struct *t)
+{
+       return 0;
+}
 static inline int cgroup_attach_task_current_cg(struct task_struct *t)
 {
        return 0;
index ee3049cb9ba5782e27447d3984a914a2d39ae87b..52baa79d69a763f7f94f728b115fe0337190addd 100644 (file)
@@ -63,6 +63,9 @@
  *            IRQ lines will appear.  Similarly to gpio_base, the expander
  *            will create a block of irqs beginning at this number.
  *            This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ *                      reset of the chip at the beginning of the probe
+ *                      in order to place it in a known state.
  */
 struct sx150x_platform_data {
        unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
        u16      io_polarity;
        int      irq_summary;
        unsigned irq_base;
+       bool     reset_during_probe;
 };
 
 #endif /* __LINUX_I2C_SX150X_H */
index 0a6b3d5c490ccfcd3ab9a3dbdba9b41f88e1c4b8..7fb59279373823339f6fdda86158952e3e6fac45 100644 (file)
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
        iounmap_atomic(vaddr, slot);
 }
 
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
        resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
        iounmap(vaddr);
 }
@@ -125,38 +125,38 @@ struct io_mapping;
 static inline struct io_mapping *
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 {
-       return (struct io_mapping *) ioremap_wc(base, size);
+       return (struct io_mapping __force *) ioremap_wc(base, size);
 }
 
 static inline void
 io_mapping_free(struct io_mapping *mapping)
 {
-       iounmap(mapping);
+       iounmap((void __force __iomem *) mapping);
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
 }
 
 /* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
 }
 
index 4aa95f203f3ee773a6ab4bbdbb632efaf970785e..62dbee554f608c91fe7b2b3bf01f390260821c52 100644 (file)
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.in = __tmp->kfifo.out = 0; \
 })
 
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset_out(fifo)  \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.out = __tmp->kfifo.in; \
 })
 
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_len(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpl = (fifo); \
+       typeof((fifo) + 1) __tmpl = (fifo); \
        __tmpl->kfifo.in - __tmpl->kfifo.out; \
 })
 
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_empty(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        __tmpq->kfifo.in == __tmpq->kfifo.out; \
 })
 
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_full(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
 })
 
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
 #define        kfifo_avail(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        const size_t __recsize = sizeof(*__tmpq->rectype); \
        unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
        (__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_skip(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
 #define kfifo_peek_len(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
 #define kfifo_alloc(fifo, size, gfp_mask) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_free(fifo) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__is_kfifo_ptr(__tmp)) \
                __kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_init(fifo, buffer, size) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_put(fifo, val) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_get(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_peek(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_in(fifo, buf, n) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_from_user(fifo, from, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const void __user *__from = (from); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_to_user(fifo, to, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        void __user *__to = (to); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_in_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_in_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_out_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo);  \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_out_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out_peek(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
index 74d691ee9121c5bb3aa8336d7eeffcc03d88cc46..3319a6967626e02f91c340080b21950a8310a67f 100644 (file)
@@ -16,6 +16,9 @@
 struct stable_node;
 struct mem_cgroup;
 
+struct page *ksm_does_need_to_copy(struct page *page,
+                       struct vm_area_struct *vma, unsigned long address);
+
 #ifdef CONFIG_KSM
 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_does_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
        struct anon_vma *anon_vma = page_anon_vma(page);
 
-       if (!anon_vma ||
-           (anon_vma->root == vma->anon_vma->root &&
-            page->index == linear_page_index(vma, address)))
-               return page;
-
-       return ksm_does_need_to_copy(page, vma, address);
+       return anon_vma &&
+               (anon_vma->root != vma->anon_vma->root ||
+                page->index != linear_page_index(vma, address));
 }
 
 int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
        return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
-       return page;
+       return 0;
 }
 
 static inline int page_referenced_ksm(struct page *page,
index b288cb713b902182cca71156e5d9f75c7452a117..f549056fb20bd5533555918cc1b1f9805c2cdcc3 100644 (file)
        int i;                                                          \
        preempt_disable();                                              \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock(void) {                                     \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
index e6b1210772ceace3fc70817a32e1a411096a6b22..74949fbef8c608b9c5ef6dab3b508041a11243f3 100644 (file)
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len);
index 329a8faa6e37bb32bd6e65f1b8fd758ac45baa36..245cdacee5443791eb5418d10bde3daa190351a9 100644 (file)
@@ -38,6 +38,8 @@
  *      [8:0] Byte/block count
  */
 
+#define R4_MEMORY_PRESENT (1 << 27)
+
 /*
   SDIO status in R5
   Type
index 6e6e62648a4d4a6d792fe207d42105563b112aaa..3984c4eb41fdc9c85759dbffe308df5a806391f4 100644 (file)
@@ -283,6 +283,13 @@ struct zone {
        /* zone watermarks, access with *_wmark_pages(zone) macros */
        unsigned long watermark[NR_WMARK];
 
+       /*
+        * When free pages are below this point, additional steps are taken
+        * when reading the number of free pages to avoid per-cpu counter
+        * drift allowing watermarks to be breached
+        */
+       unsigned long percpu_drift_mark;
+
        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
        return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
index 878cab4f5fcc5db95585184c22aea5d905a34295..f363bc8fdc74c821c99aa59d5bfcb9554c012c9a 100644 (file)
@@ -78,6 +78,14 @@ struct mutex_waiter {
 # include <linux/mutex-debug.h>
 #else
 # define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
 # define mutex_init(mutex) \
 do {                                                   \
        static struct lock_class_key __key;             \
index 7415839ac890f538b88611f7843b5b770e73292f..5310d27abd2a503ad523059ea4832f34ecfbb194 100644 (file)
@@ -26,6 +26,9 @@ struct semaphore {
        .wait_list      = LIST_HEAD_INIT((name).wait_list),             \
 }
 
+#define DEFINE_SEMAPHORE(name) \
+       struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
 #define DECLARE_MUTEX(name)    \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
 
index 2fee51a11b7399aea7ea7427ae32777b45cf8903..7cdd63366f883a164a7f5d5b74ff8882c62b4f8c 100644 (file)
@@ -19,6 +19,7 @@ struct bio;
 #define SWAP_FLAG_PREFER       0x8000  /* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK    0x7fff
 #define SWAP_FLAG_PRIO_SHIFT   0
+#define SWAP_FLAG_DISCARD      0x10000 /* discard swap cluster after use */
 
 static inline int current_is_kswapd(void)
 {
@@ -142,7 +143,7 @@ struct swap_extent {
 enum {
        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
-       SWP_DISCARDABLE = (1 << 2),     /* blkdev supports discard */
+       SWP_DISCARDABLE = (1 << 2),     /* swapon+blkdev support discard */
        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
 extern long total_swap_pages;
 extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
 /* linux/mm/thrash.c */
 extern struct mm_struct *swap_token_mm;
 extern void grab_swap_token(struct mm_struct *);
index 7f43ccdc1d38c0eb919efe4891e1b91ec19c3705..eaaea37b3b75dd64b73a34a0e3beb31417bdd0d6 100644 (file)
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
+{
+       long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+       int cpu;
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
+}
+
 extern unsigned long global_reclaimable_pages(void);
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 
index 192f88c5b0f9df29b80d51d4c5ceba5388d099eb..c9483d8f6140ed6cb4e06fa6139e2aeb907b3d47 100644 (file)
@@ -1791,19 +1791,20 @@ out:
 }
 
 /**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
  * @tsk: the task to be attached
  */
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 {
        struct cgroupfs_root *root;
-       struct cgroup *cur_cg;
        int retval = 0;
 
        cgroup_lock();
        for_each_active_root(root) {
-               cur_cg = task_cgroup_from_root(current, root);
-               retval = cgroup_attach_task(cur_cg, tsk);
+               struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+               retval = cgroup_attach_task(from_cg, tsk);
                if (retval)
                        break;
        }
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
 
        return retval;
 }
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
 /*
  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
index 75bd9b3ebbb7cf501800115a531e86e7c30c5a28..20059ef4459a4ff293428337d9936ff438a8eb96 100644 (file)
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
        int i, bpno;
        kdb_bp_t *bp, *bp_check;
        int diag;
-       int free;
        char *symname = NULL;
        long offset = 0ul;
        int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
        /*
         * Find an empty bp structure to allocate
         */
-       free = KDB_MAXBPT;
        for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
                if (bp->bp_free)
                        break;
index ef3c3f88a7a35e36d8f1fb551c56e534cea52687..f83972b16564d00676154900f09d3c70affd773c 100644 (file)
  * @children: child nodes
  * @all: list head for list of all nodes
  * @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- *         copy of the profiling data here to allow collecting coverage data
- *         for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ *   files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ *   object files. Used only when gcov_persist=1.
  * @dentry: main debugfs entry, either a directory or data file
  * @links: associated symbolic links
  * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
        struct list_head children;
        struct list_head all;
        struct gcov_node *parent;
-       struct gcov_info *info;
-       struct gcov_info *ghost;
+       struct gcov_info **loaded_info;
+       struct gcov_info *unloaded_info;
        struct dentry *dentry;
        struct dentry **links;
+       int num_loaded;
        char name[0];
 };
 
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
 };
 
 /*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
  */
 static struct gcov_info *get_node_info(struct gcov_node *node)
 {
-       if (node->info)
-               return node->info;
+       if (node->num_loaded > 0)
+               return node->loaded_info[0];
 
-       return node->ghost;
+       return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+       struct gcov_info *info;
+       int i = 0;
+
+       if (node->unloaded_info)
+               info = gcov_info_dup(node->unloaded_info);
+       else
+               info = gcov_info_dup(node->loaded_info[i++]);
+       if (!info)
+               return NULL;
+       for (; i < node->num_loaded; i++)
+               gcov_info_add(info, node->loaded_info[i]);
+
+       return info;
 }
 
 /*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
        mutex_lock(&node_lock);
        /*
         * Read from a profiling data copy to minimize reference tracking
-        * complexity and concurrent access.
+        * complexity and concurrent access and to keep accumulating multiple
+        * profiling data sets associated with one node simple.
         */
-       info = gcov_info_dup(get_node_info(node));
+       info = get_accumulated_info(node);
        if (!info)
                goto out_unlock;
        iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
        return NULL;
 }
 
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+       int i;
+
+       if (node->unloaded_info)
+               gcov_info_reset(node->unloaded_info);
+       for (i = 0; i < node->num_loaded; i++)
+               gcov_info_reset(node->loaded_info[i]);
+}
+
 static void remove_node(struct gcov_node *node);
 
 /*
  * write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
  */
 static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
                              size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
        node = get_node_by_name(info->filename);
        if (node) {
                /* Reset counts or remove node for unloaded modules. */
-               if (node->ghost)
+               if (node->num_loaded == 0)
                        remove_node(node);
                else
-                       gcov_info_reset(node->info);
+                       reset_node(node);
        }
        /* Reset counts for open file. */
        gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
        INIT_LIST_HEAD(&node->list);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->all);
-       node->info = info;
+       if (node->loaded_info) {
+               node->loaded_info[0] = info;
+               node->num_loaded = 1;
+       }
        node->parent = parent;
        if (name)
                strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        struct gcov_node *node;
 
        node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
-       if (!node) {
-               pr_warning("out of memory\n");
-               return NULL;
+       if (!node)
+               goto err_nomem;
+       if (info) {
+               node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+                                          GFP_KERNEL);
+               if (!node->loaded_info)
+                       goto err_nomem;
        }
        init_node(node, info, name, parent);
        /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        list_add(&node->all, &all_head);
 
        return node;
+
+err_nomem:
+       kfree(node);
+       pr_warning("out of memory\n");
+       return NULL;
 }
 
 /* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
        list_del(&node->all);
        debugfs_remove(node->dentry);
        remove_links(node);
-       if (node->ghost)
-               gcov_info_free(node->ghost);
+       kfree(node->loaded_info);
+       if (node->unloaded_info)
+               gcov_info_free(node->unloaded_info);
        kfree(node);
 }
 
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
 
 /*
  * write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
  */
 static ssize_t reset_write(struct file *file, const char __user *addr,
                           size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
        mutex_lock(&node_lock);
 restart:
        list_for_each_entry(node, &all_head, all) {
-               if (node->info)
-                       gcov_info_reset(node->info);
+               if (node->num_loaded > 0)
+                       reset_node(node);
                else if (list_empty(&node->children)) {
                        remove_node(node);
                        /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
 }
 
 /*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
  */
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
 {
-       node->ghost = gcov_info_dup(node->info);
-       if (!node->ghost) {
-               pr_warning("could not save data for '%s' (out of memory)\n",
-                          node->info->filename);
-               return -ENOMEM;
+       struct gcov_info **loaded_info;
+       int num = node->num_loaded;
+
+       /*
+        * Prepare new array. This is done first to simplify cleanup in
+        * case the new data set is incompatible, the node only contains
+        * unloaded data sets and there's not enough memory for the array.
+        */
+       loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+       if (!loaded_info) {
+               pr_warning("could not add '%s' (out of memory)\n",
+                          info->filename);
+               return;
+       }
+       memcpy(loaded_info, node->loaded_info,
+              num * sizeof(struct gcov_info *));
+       loaded_info[num] = info;
+       /* Check if the new data set is compatible. */
+       if (num == 0) {
+               /*
+                * A module was unloaded, modified and reloaded. The new
+                * data set replaces the copy of the last one.
+                */
+               if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+                       pr_warning("discarding saved data for %s "
+                                  "(incompatible version)\n", info->filename);
+                       gcov_info_free(node->unloaded_info);
+                       node->unloaded_info = NULL;
+               }
+       } else {
+               /*
+                * Two different versions of the same object file are loaded.
+                * The initial one takes precedence.
+                */
+               if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+                       pr_warning("could not add '%s' (incompatible "
+                                  "version)\n", info->filename);
+                       kfree(loaded_info);
+                       return;
+               }
        }
-       node->info = NULL;
+       /* Overwrite previous array. */
+       kfree(node->loaded_info);
+       node->loaded_info = loaded_info;
+       node->num_loaded = num + 1;
+}
 
-       return 0;
+/*
+ * Return the index of a profiling data set associated with a node.
+ */
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       for (i = 0; i < node->num_loaded; i++) {
+               if (node->loaded_info[i] == info)
+                       return i;
+       }
+       return -ENOENT;
 }
 
 /*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Save the data of a profiling data set which is being unloaded.
  */
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static void save_info(struct gcov_node *node, struct gcov_info *info)
 {
-       if (gcov_info_is_compatible(node->ghost, info))
-               gcov_info_add(info, node->ghost);
+       if (node->unloaded_info)
+               gcov_info_add(node->unloaded_info, info);
        else {
-               pr_warning("discarding saved data for '%s' (version changed)\n",
+               node->unloaded_info = gcov_info_dup(info);
+               if (!node->unloaded_info) {
+                       pr_warning("could not save data for '%s' "
+                                  "(out of memory)\n", info->filename);
+               }
+       }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       i = get_info_index(node, info);
+       if (i < 0) {
+               pr_warning("could not remove '%s' (not found)\n",
                           info->filename);
+               return;
        }
-       gcov_info_free(node->ghost);
-       node->ghost = NULL;
-       node->info = info;
+       if (gcov_persist)
+               save_info(node, info);
+       /* Shrink array. */
+       node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+       node->num_loaded--;
+       if (node->num_loaded > 0)
+               return;
+       /* Last loaded data set was removed. */
+       kfree(node->loaded_info);
+       node->loaded_info = NULL;
+       node->num_loaded = 0;
+       if (!node->unloaded_info)
+               remove_node(node);
 }
 
 /*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
        node = get_node_by_name(info->filename);
        switch (action) {
        case GCOV_ADD:
-               /* Add new node or revive ghost. */
-               if (!node) {
+               if (node)
+                       add_info(node, info);
+               else
                        add_node(info);
-                       break;
-               }
-               if (gcov_persist)
-                       revive_node(node, info);
-               else {
-                       pr_warning("could not add '%s' (already exists)\n",
-                                  info->filename);
-               }
                break;
        case GCOV_REMOVE:
-               /* Remove node or turn into ghost. */
-               if (!node) {
+               if (node)
+                       remove_info(node, info);
+               else {
                        pr_warning("could not remove '%s' (not found)\n",
                                   info->filename);
-                       break;
                }
-               if (gcov_persist) {
-                       if (!ghost_node(node))
-                               break;
-               }
-               remove_node(node);
                break;
        }
        mutex_unlock(&node_lock);
index 53b1916c94926c245aacd17ccb420fc00d06a19b..253dc0f35cf4c30786d1ff3565427d1374762a04 100644 (file)
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
        right = group_info->ngroups;
        while (left < right) {
                unsigned int mid = (left+right)/2;
-               int cmp = grp - GROUP_AT(group_info, mid);
-               if (cmp > 0)
+               if (grp > GROUP_AT(group_info, mid))
                        left = mid + 1;
-               else if (cmp < 0)
+               else if (grp < GROUP_AT(group_info, mid))
                        right = mid;
                else
                        return 1;
index ce669174f355c7dd1e1893903bb4d18a25f17c34..1decafbb6b1a28197b768021cc987e230d352b5b 100644 (file)
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
  */
 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
 {
-       struct hrtimer_clock_base *base;
        unsigned long flags;
        ktime_t rem;
 
-       base = lock_hrtimer_base(timer, &flags);
+       lock_hrtimer_base(timer, &flags);
        rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
index 4c0b7b3e6d2e9a483c6cb4cc384e979911ed03bb..200407c1502f509ee3f9d8a665bc4d3b78a27f74 100644 (file)
 # include <asm/mutex.h>
 #endif
 
-/***
- * mutex_init - initialize the mutex
- * @lock: the mutex to be initialized
- * @key: the lock_class_key for the class; used by mutex lock debugging
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
 static __used noinline void __sched
 __mutex_lock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_lock - acquire the mutex
  * @lock: the mutex to be acquired
  *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
 
 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_unlock - release the mutex
  * @lock: the mutex to be released
  *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
 
-/***
- * mutex_lock_interruptible - acquire the mutex, interruptable
+/**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
  * @lock: the mutex to be acquired
  *
  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
        return prev == 1;
 }
 
-/***
- * mutex_trylock - try acquire the mutex, without waiting
+/**
+ * mutex_trylock - try to acquire the mutex, without waiting
  * @lock: the mutex to be acquired
  *
  * Try to acquire the mutex atomically. Returns 1 if the mutex
  * has been acquired successfully, and 0 on contention.
  *
  * NOTE: this function follows the spin_trylock() convention, so
- * it is negated to the down_trylock() return values! Be careful
+ * it is negated from the down_trylock() return values! Be careful
  * about this when converting semaphore users to mutexes.
  *
  * This function must not be used in interrupt context. The
index 403d1804b198140e4f1355c70c0b25e6efa9e5d8..657555a5f30fc9399513e9538a205d2e8bf0fff0 100644 (file)
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
        }
 }
 
+static inline int
+event_filter_match(struct perf_event *event)
+{
+       return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
 static void
 event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 delta;
+       /*
+        * An event which could not be activated because of
+        * filter mismatch still needs to have its timings
+        * maintained, otherwise bogus information is return
+        * via read() for time_enabled, time_running:
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE
+           && !event_filter_match(event)) {
+               delta = ctx->time - event->tstamp_stopped;
+               event->tstamp_running += delta;
+               event->tstamp_stopped = ctx->time;
+       }
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return;
 
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
                struct perf_event_context *ctx)
 {
        struct perf_event *event;
-
-       if (group_event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+       int state = group_event->state;
 
        event_sched_out(group_event, cpuctx, ctx);
 
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
        list_for_each_entry(event, &group_event->sibling_list, group_entry)
                event_sched_out(event, cpuctx, ctx);
 
-       if (group_event->attr.exclusive)
+       if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
index c77963938bca440a90423952d6d85cf4d66abd83..8dc31e02ae129e8f042804b67c38ab02f997d94c 100644 (file)
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
                goto Close;
 
        suspend_console();
-       hibernation_freeze_swap();
        saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
        error = dpm_suspend_start(PMSG_FREEZE);
        if (error)
index 5e7edfb05e66cff0d2c99d5fc8fddfde03e372c3..f6cd6faf84fdb516323e4257f53a61778c7fd60b 100644 (file)
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
        buffer = NULL;
        alloc_normal = 0;
        alloc_highmem = 0;
-       hibernation_thaw_swap();
 }
 
 /* Helper functions used for the shrinking of memory. */
index 5d0059eed3e4e3ce0bc38ad072bd7b1430d9f712..e6a5bdf61a375c309c1f9ea356e9123d79699037 100644 (file)
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
 {
        unsigned long offset;
 
-       offset = swp_offset(get_swap_for_hibernation(swap));
+       offset = swp_offset(get_swap_page_of_type(swap));
        if (offset) {
                if (swsusp_extents_insert(offset))
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
                else
                        return swapdev_block(swap, offset);
        }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
                ext = container_of(node, struct swsusp_extent, node);
                rb_erase(node, &swsusp_extents);
                for (offset = ext->start; offset <= ext->end; offset++)
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
 
                kfree(ext);
        }
index ab661ebc4895a8471ecc808825477cf0c3558444..134f7edb30c6ce4dc45548a25f75cb13f67a269d 100644 (file)
@@ -1313,7 +1313,7 @@ static struct sched_group *
 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int load_idx)
 {
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+       struct sched_group *idlest = NULL, *group = sd->groups;
        unsigned long min_load = ULONG_MAX, this_load = 0;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                if (local_group) {
                        this_load = avg_load;
-                       this = group;
                } else if (avg_load < min_load) {
                        min_load = avg_load;
                        idlest = group;
index e9ad4448982860af9919df53c3368156a4bf2445..7f5a0cd296a96ca44e43f0db028026094dbbb57a 100644 (file)
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
                pgid = pid;
        if (pgid < 0)
                return -EINVAL;
+       rcu_read_lock();
 
        /* From this point forward we keep holding onto the tasklist lock
         * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 out:
        /* All paths lead to here, thus we are safe. -DaveM */
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        return err;
 }
 
index ca38e8e3e907557f74faaad7ddb57d330bd43d2d..f88552c6d2275be1216187f07b1e0e1b22b93af2 100644 (file)
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
 {
        sysctl_set_parent(NULL, root_table);
 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK
-       {
-               int err;
-               err = sysctl_check_table(current->nsproxy, root_table);
-       }
+       sysctl_check_table(current->nsproxy, root_table);
 #endif
        return 0;
 }
index 0d88ce9b9fb8828c9a81fdffcd47763ae5cc2543..7cb1f45a1de1ccf61e9cfcf59e0abe0642f12cf2 100644 (file)
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
 {
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
+       int ret = 0;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       static DEFINE_MUTEX(mutex);
        static struct trace_seq s;
        unsigned long long avg;
        unsigned long long stddev;
 #endif
+       mutex_lock(&ftrace_profile_lock);
+
+       /* we raced with function_profile_reset() */
+       if (unlikely(rec->counter == 0)) {
+               ret = -EBUSY;
+               goto out;
+       }
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
                do_div(stddev, (rec->counter - 1) * 1000);
        }
 
-       mutex_lock(&mutex);
        trace_seq_init(&s);
        trace_print_graph_duration(rec->time, &s);
        trace_seq_puts(&s, "    ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
        trace_seq_puts(&s, "    ");
        trace_print_graph_duration(stddev, &s);
        trace_print_seq(m, &s);
-       mutex_unlock(&mutex);
 #endif
        seq_putc(m, '\n');
+out:
+       mutex_unlock(&ftrace_profile_lock);
 
-       return 0;
+       return ret;
 }
 
 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
index 19cccc3c302871beae5fd39ad937b0791a2e785d..492197e2f86cda2792603186b59ad3fdd17c448d 100644 (file)
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_advance_iter(struct ring_buffer_iter *iter)
 {
-       struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        unsigned length;
 
        cpu_buffer = iter->cpu_buffer;
-       buffer = cpu_buffer->buffer;
 
        /*
         * Check if we are at the end of the buffer.
index 0d53c8e853b12450cf0c74665d13a22e91a47543..7f9c3c52ecc12ef5d0de1728839218ff87c2dc7b 100644 (file)
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
 
 void touch_softlockup_watchdog(void)
 {
-       __get_cpu_var(watchdog_touch_ts) = 0;
+       __raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 void touch_nmi_watchdog(void)
 {
-       __get_cpu_var(watchdog_nmi_touch) = true;
+       if (watchdog_enabled) {
+               unsigned cpu;
+
+               for_each_present_cpu(cpu) {
+                       if (per_cpu(watchdog_nmi_touch, cpu) != true)
+                               per_cpu(watchdog_nmi_touch, cpu) = true;
+               }
+       }
        touch_softlockup_watchdog();
 }
 EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
                wake_up_process(p);
        }
 
+       /* if any cpu succeeds, watchdog is considered enabled for the system */
+       watchdog_enabled = 1;
+
        return 0;
 }
 
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
                per_cpu(softlockup_watchdog, cpu) = NULL;
                kthread_stop(p);
        }
-
-       /* if any cpu succeeds, watchdog is considered enabled for the system */
-       watchdog_enabled = 1;
 }
 
 static void watchdog_enable_all_cpus(void)
index f4e516e9c37cc4c62f93faf92131a632098d2ca5..f0fb9124e410c436c0f240d69f089f4935af2e92 100644 (file)
@@ -189,7 +189,7 @@ config COMPACTION
 config MIGRATION
        bool "Page migration"
        def_bool y
-       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
+       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
        help
          Allows the migration of the physical location of pages of processes
          while the virtual addresses are not changed. This is useful in
index 13b6dad1eed272bec61a388d17f116be62cb1bb5..1481de68184bce6d8fae978d3b6be8e3223319a5 100644 (file)
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
-               flush_dcache_page(tovec->bv_page);
                bounce_copy_vec(tovec, vfrom);
+               flush_dcache_page(tovec->bv_page);
        }
 }
 
index 94cce51b0b3535af75c20f29ecb86a11aba32a71..4d709ee5901370842534224a9f81e7d13943e196 100644 (file)
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
 /* Similar to reclaim, but different enough that they don't share logic */
 static bool too_many_isolated(struct zone *zone)
 {
-
-       unsigned long inactive, isolated;
+       unsigned long active, inactive, isolated;
 
        inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
                                        zone_page_state(zone, NR_INACTIVE_ANON);
+       active = zone_page_state(zone, NR_ACTIVE_FILE) +
+                                       zone_page_state(zone, NR_ACTIVE_ANON);
        isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
                                        zone_page_state(zone, NR_ISOLATED_ANON);
 
-       return isolated > inactive;
+       return isolated > (inactive + active) / 2;
 }
 
 /*
index e2ae00458320786a380a1ab370efe0dc6cfd6e1a..b1873cf03ed986bcb062259da8f1c4093a97160b 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
 {
        struct page *new_page;
 
-       unlock_page(page);      /* any racers will COW it, not modify it */
-
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (new_page) {
                copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
                        add_page_to_unevictable_list(new_page);
        }
 
-       page_cache_release(page);
        return new_page;
 }
 
index 6b2ab10518512052c895dd5db7ff0f20fd1df2f3..71b161b73bb503be50556e9ff302ca0c7eaba396 100644 (file)
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
-       struct page *page;
+       struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
        struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       page = ksm_might_need_to_copy(page, vma, address);
-       if (!page) {
-               ret = VM_FAULT_OOM;
-               goto out;
+       /*
+        * Make sure try_to_free_swap didn't release the swapcache
+        * from under us. The page pin isn't enough to prevent that.
+        */
+       if (unlikely(!PageSwapCache(page)))
+               goto out_page;
+
+       if (ksm_might_need_to_copy(page, vma, address)) {
+               swapcache = page;
+               page = ksm_does_need_to_copy(page, vma, address);
+
+               if (unlikely(!page)) {
+                       ret = VM_FAULT_OOM;
+                       page = swapcache;
+                       swapcache = NULL;
+                       goto out_page;
+               }
        }
 
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
        unlock_page(page);
+       if (swapcache) {
+               /*
+                * Hold the lock to avoid the swap entry to be reused
+                * until we take the PT lock for the pte_same() check
+                * (to avoid false positives from pte_same). For
+                * further safety release the lock after the swap_free
+                * so that the swap count won't change under a
+                * parallel locked swapcache.
+                */
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
 
        if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,6 +2781,10 @@ out_page:
        unlock_page(page);
 out_release:
        page_cache_release(page);
+       if (swapcache) {
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
        return ret;
 }
 
index a4cfcdc00455de4be15fcec98c76e45f8de5feab..dd186c1a5d53f9ebd27de4c1bedcaac471d6c93e 100644 (file)
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page)
 /* Return the start of the next active pageblock after a given page */
 static struct page *next_active_pageblock(struct page *page)
 {
-       int pageblocks_stride;
-
        /* Ensure the starting page is pageblock-aligned */
        BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 
-       /* Move forward by at least 1 * pageblock_nr_pages */
-       pageblocks_stride = 1;
-
        /* If the entire pageblock is free, move to the end of free page */
-       if (pageblock_free(page))
-               pageblocks_stride += page_order(page) - pageblock_order;
+       if (pageblock_free(page)) {
+               int order;
+               /* be careful. we don't have locks, page_order can be changed.*/
+               order = page_order(page);
+               if ((order < MAX_ORDER) && (order >= pageblock_order))
+                       return page + (1 << order);
+       }
 
-       return page + (pageblocks_stride * pageblock_nr_pages);
+       return page + pageblock_nr_pages;
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
index cbae7c5b95680a1bfca1df7e11a215bfce15b57c..b70919ce4f72e6941f67b1a5462f5f270c231536 100644 (file)
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
 {
        return (vma->vm_flags & VM_GROWSDOWN) &&
index f5b7d1760213e53db3c46e84dde56daf219ea0cd..e35bfb82c8555b7377334dbea42bfcf588b0bab8 100644 (file)
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn,
        return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+       unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+       /*
+        * While kswapd is awake, it is considered the zone is under some
+        * memory pressure. Under pressure, there is a risk that
+        * per-cpu-counter-drift will allow the min watermark to be breached
+        * potentially causing a live-lock. While kswapd is awake and
+        * free pages are low, get a better estimate for free pages
+        */
+       if (nr_free_pages < zone->percpu_drift_mark &&
+                       !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+               return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+       return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
index a9649f4b261e6b3c01632939c46a77f19f447de1..a8cfa9cc6e86e5d6912a39bc3f5c9d18fb97b7cf 100644 (file)
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
+       int to_free = count;
 
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-       while (count) {
+       while (to_free) {
                struct page *page;
                struct list_head *list;
 
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, zone, 0, page_private(page));
                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
-               } while (--count && --batch_free && !list_empty(list));
+               } while (--to_free && --batch_free && !list_empty(list));
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        spin_unlock(&zone->lock);
 }
 
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        __free_one_page(page, zone, order, migratetype);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        spin_unlock(&zone->lock);
 }
 
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 {
        /* free_pages my go negative - that's OK */
        long min = mark;
-       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+       long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        struct reclaim_state reclaim_state;
        struct task_struct *p = current;
+       bool drained = false;
 
        cond_resched();
 
@@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
-       if (order != 0)
-               drain_all_pages();
+       if (unlikely(!(*did_some_progress)))
+               return NULL;
 
-       if (likely(*did_some_progress))
-               page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+       page = get_page_from_freelist(gfp_mask, nodemask, order,
                                        zonelist, high_zoneidx,
                                        alloc_flags, preferred_zone,
                                        migratetype);
+
+       /*
+        * If an allocation failed after direct reclaim, it could be because
+        * pages are pinned on the per-cpu lists. Drain them and try again
+        */
+       if (!page && !drained) {
+               drain_all_pages();
+               drained = true;
+               goto retry;
+       }
+
        return page;
 }
 
@@ -2423,7 +2436,7 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone_page_state(zone, NR_FREE_PAGES)),
+                       K(zone_nr_free_pages(zone)),
                        K(min_wmark_pages(zone)),
                        K(low_wmark_pages(zone)),
                        K(high_wmark_pages(zone)),
index 1f3f9c59a73ab5be4ff4bb37f428364df7544706..7c703ff2f36f0b760b79eb36149084f07621a0a1 100644 (file)
@@ -47,8 +47,6 @@ long nr_swap_pages;
 long total_swap_pages;
 static int least_priority;
 
-static bool swap_for_hibernation;
-
 static const char Bad_file[] = "Bad swap file entry ";
 static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
        if (nr_blocks) {
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        return err;
                cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        break;
 
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
                        start_block <<= PAGE_SHIFT - 9;
                        nr_blocks <<= PAGE_SHIFT - 9;
                        if (blkdev_issue_discard(si->bdev, start_block,
-                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
-                                                       BLKDEV_IFL_BARRIER))
+                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
                                break;
                }
 
@@ -320,10 +315,8 @@ checks:
        if (offset > si->highest_bit)
                scan_base = offset = si->lowest_bit;
 
-       /* reuse swap entry of cache-only swap if not hibernation. */
-       if (vm_swap_full()
-               && usage == SWAP_HAS_CACHE
-               && si->swap_map[offset] == SWAP_HAS_CACHE) {
+       /* reuse swap entry of cache-only swap if not busy. */
+       if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
                int swap_was_freed;
                spin_unlock(&swap_lock);
                swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
        spin_lock(&swap_lock);
        if (nr_swap_pages <= 0)
                goto noswap;
-       if (swap_for_hibernation)
-               goto noswap;
        nr_swap_pages--;
 
        for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
        return (swp_entry_t) {0};
 }
 
+/* The only caller of this function is now susupend routine */
+swp_entry_t get_swap_page_of_type(int type)
+{
+       struct swap_info_struct *si;
+       pgoff_t offset;
+
+       spin_lock(&swap_lock);
+       si = swap_info[type];
+       if (si && (si->flags & SWP_WRITEOK)) {
+               nr_swap_pages--;
+               /* This is called for allocating swap entry, not cache */
+               offset = scan_swap_map(si, 1);
+               if (offset) {
+                       spin_unlock(&swap_lock);
+                       return swp_entry(type, offset);
+               }
+               nr_swap_pages++;
+       }
+       spin_unlock(&swap_lock);
+       return (swp_entry_t) {0};
+}
+
 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 {
        struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
        if (page_swapcount(page))
                return 0;
 
+       /*
+        * Once hibernation has begun to create its image of memory,
+        * there's a danger that one of the calls to try_to_free_swap()
+        * - most probably a call from __try_to_reclaim_swap() while
+        * hibernation is allocating its own swap pages for the image,
+        * but conceivably even a call from memory reclaim - will free
+        * the swap from a page which has already been recorded in the
+        * image as a clean swapcache page, and then reuse its swap for
+        * another page of the image.  On waking from hibernation, the
+        * original page might be freed under memory pressure, then
+        * later read back in from swap, now with the wrong data.
+        *
+        * Hibernation clears bits from gfp_allowed_mask to prevent
+        * memory reclaim from writing to disk, so check that here.
+        */
+       if (!(gfp_allowed_mask & __GFP_IO))
+               return 0;
+
        delete_from_swap_cache(page);
        SetPageDirty(page);
        return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
 #endif
 
 #ifdef CONFIG_HIBERNATION
-
-static pgoff_t hibernation_offset[MAX_SWAPFILES];
-/*
- * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
- * saved swap_map[] image to the disk will be an incomplete because it's
- * changing without synchronization with hibernation snap shot.
- * At resume, we just make swap_for_hibernation=false. We can forget
- * used maps easily.
- */
-void hibernation_freeze_swap(void)
-{
-       int i;
-
-       spin_lock(&swap_lock);
-
-       printk(KERN_INFO "PM: Freeze Swap\n");
-       swap_for_hibernation = true;
-       for (i = 0; i < MAX_SWAPFILES; i++)
-               hibernation_offset[i] = 1;
-       spin_unlock(&swap_lock);
-}
-
-void hibernation_thaw_swap(void)
-{
-       spin_lock(&swap_lock);
-       if (swap_for_hibernation) {
-               printk(KERN_INFO "PM: Thaw Swap\n");
-               swap_for_hibernation = false;
-       }
-       spin_unlock(&swap_lock);
-}
-
-/*
- * Because updateing swap_map[] can make not-saved-status-change,
- * we use our own easy allocator.
- * Please see kernel/power/swap.c, Used swaps are recorded into
- * RB-tree.
- */
-swp_entry_t get_swap_for_hibernation(int type)
-{
-       pgoff_t off;
-       swp_entry_t val = {0};
-       struct swap_info_struct *si;
-
-       spin_lock(&swap_lock);
-
-       si = swap_info[type];
-       if (!si || !(si->flags & SWP_WRITEOK))
-               goto done;
-
-       for (off = hibernation_offset[type]; off < si->max; ++off) {
-               if (!si->swap_map[off])
-                       break;
-       }
-       if (off < si->max) {
-               val = swp_entry(type, off);
-               hibernation_offset[type] = off + 1;
-       }
-done:
-       spin_unlock(&swap_lock);
-       return val;
-}
-
-void swap_free_for_hibernation(swp_entry_t ent)
-{
-       /* Nothing to do */
-}
-
 /*
  * Find the swap type that corresponds to given device (if any).
  *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                        p->flags |= SWP_SOLIDSTATE;
                        p->cluster_next = 1 + (random32() % p->highest_bit);
                }
-               if (discard_swap(p) == 0)
+               if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
                        p->flags |= SWP_DISCARDABLE;
        }
 
index f389168f9a837b9c6be4e1f9bb3d0892396315de..355a9e669aaa800d62fa31d2b83110bf76cce9d7 100644 (file)
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void)
        int threshold;
 
        for_each_populated_zone(zone) {
+               unsigned long max_drift, tolerate_drift;
+
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
                        per_cpu_ptr(zone->pageset, cpu)->stat_threshold
                                                        = threshold;
+
+               /*
+                * Only set percpu_drift_mark if there is a danger that
+                * NR_FREE_PAGES reports the low watermark is ok when in fact
+                * the min watermark could be breached by an allocation
+                */
+               tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+               max_drift = num_online_cpus() * threshold;
+               if (max_drift > tolerate_drift)
+                       zone->percpu_drift_mark = high_wmark_pages(zone) +
+                                       max_drift;
        }
 }
 
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
-                  zone_page_state(zone, NR_FREE_PAGES),
+                  zone_nr_free_pages(zone),
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
+               refresh_zone_stat_thresholds();
                start_cpu_timer(cpu);
                node_set_state(cpu_to_node(cpu), N_CPU);
                break;
index 3c88be94649408b412c0193f0bf4186273c6a972..02baec732bb512c77fed7d5ede247b72d788a147 100644 (file)
@@ -33,8 +33,8 @@ struct aa_rlimit {
 };
 
 int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+                     unsigned int resource, struct rlimit *new_rlim);
 
 void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
 
index 6e85cdb4303f69fc1911c94aefd5f1c5b695239c..506d2baf614797624fc4b9450c0d12c9f56e8ae4 100644 (file)
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
        *ns_name = NULL;
        if (name[0] == ':') {
                char *split = strchr(&name[1], ':');
+               *ns_name = skip_spaces(&name[1]);
                if (split) {
                        /* overwrite ':' with \0 */
                        *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
                } else
                        /* a ns name without a following profile is allowed */
                        name = NULL;
-               *ns_name = &name[1];
        }
        if (name && *name == 0)
                name = NULL;
index f73e2c2042185fff2d079dbdb3f4b8b828371e72..cf1de4462ccd3fb297f48bf351dd3494804f22c1 100644 (file)
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
        int error = 0;
 
        if (!unconfined(profile))
-               error = aa_task_setrlimit(profile, resource, new_rlim);
+               error = aa_task_setrlimit(profile, task, resource, new_rlim);
 
        return error;
 }
index 19358dc14605bae1422ae00226291751695ba44c..82396050f18646ac0519352321637e930c05e367 100644 (file)
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
 {
        struct path root, tmp;
        char *res;
-       int deleted, connected;
-       int error = 0;
+       int connected, error = 0;
 
        /* Get the root we want to resolve too, released below */
        if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
        spin_lock(&dcache_lock);
-       /* There is a race window between path lookup here and the
-        * need to strip the " (deleted) string that __d_path applies
-        * Detect the race and relookup the path
-        *
-        * The stripping of (deleted) is a hack that could be removed
-        * with an updated __d_path
-        */
-       do {
-               tmp = root;
-               deleted = d_unlinked(path->dentry);
-               res = __d_path(path, &tmp, buf, buflen);
-
-       } while (deleted != d_unlinked(path->dentry));
+       tmp = root;
+       res = __d_path(path, &tmp, buf, buflen);
        spin_unlock(&dcache_lock);
 
        *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
-       if (deleted) {
-               /* On some filesystems, newly allocated dentries appear to the
-                * security_path hooks as a deleted dentry except without an
-                * inode allocated.
-                *
-                * Remove the appended deleted text and return as string for
-                * normal mediation, or auditing.  The (deleted) string is
-                * guaranteed to be added in this case, so just strip it.
-                */
-               buf[buflen - 11] = 0;   /* - (len(" (deleted)") +\0) */
 
-               if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
+       /* Handle two cases:
+        * 1. A deleted dentry && profile is not allowing mediation of deleted
+        * 2. On some filesystems, newly allocated dentries appear to the
+        *    security_path hooks as a deleted dentry except without an inode
+        *    allocated.
+        */
+       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+           !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
-               }
        }
 
        /* Determine if the path is connected to the expected root */
index 3cdc1ad0787ec9c4769455f8aeb004a417d246bf..52cc865f1464574e696fd28eca6a6e0eed326d68 100644 (file)
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                /* released below */
                ns = aa_get_namespace(root);
 
-       write_lock(&ns->lock);
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
+               write_lock(&ns->parent->lock);
                __remove_namespace(ns);
+               write_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
+               write_lock(&ns->lock);
                profile = aa_get_profile(__lookup_profile(&ns->base, name));
                if (!profile) {
                        error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                }
                name = profile->base.hname;
                __remove_profile(profile);
+               write_unlock(&ns->lock);
        }
-       write_unlock(&ns->lock);
 
        /* don't fail removal if audit fails */
        (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
index 4a368f1fd36ddf02af7204d30ee1b136f1d57bf7..a4136c10b1c6292edbdadae7285803583fb74241 100644 (file)
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
 /**
  * aa_task_setrlimit - test permission to set an rlimit
  * @profile - profile confining the task  (NOT NULL)
+ * @task - task the resource is being set on
  * @resource - the resource being set
  * @new_rlim - the new resource limit  (NOT NULL)
  *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
  *
  * Returns: 0 or error code if setting resource failed
  */
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+                     unsigned int resource, struct rlimit *new_rlim)
 {
        int error = 0;
 
-       if (profile->rlimits.mask & (1 << resource) &&
-           new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
-
-               error = audit_resource(profile, resource, new_rlim->rlim_max,
-                       -EACCES);
+       /* TODO: extend resource control to handle other (non current)
+        * processes.  AppArmor rules currently have the implicit assumption
+        * that the task is setting the resource of the current process
+        */
+       if ((task != current->group_leader) ||
+           (profile->rlimits.mask & (1 << resource) &&
+            new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+               error = -EACCES;
 
-       return error;
+       return audit_resource(profile, resource, new_rlim->rlim_max, error);
 }
 
 /**
index 16d100d3fc38de931de8e1a2679bc025359f3d31..3fbcd1dda0ef6e06da4a5b4b9c23a240a6378a35 100644 (file)
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
 
 /* set during initialization */
+extern int iint_initialized;
 extern int ima_initialized;
 extern int ima_used_chip;
 extern char *ima_hash;
index 7625b85c2274f457fc0d260a21e2d9039758d12c..afba4aef812f699134f7c7bc66c32251d2f12c69 100644 (file)
 
 RADIX_TREE(ima_iint_store, GFP_ATOMIC);
 DEFINE_SPINLOCK(ima_iint_lock);
-
 static struct kmem_cache *iint_cache __read_mostly;
 
+int iint_initialized = 0;
+
 /* ima_iint_find_get - return the iint associated with an inode
  *
  * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
        iint_cache =
            kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
                              SLAB_PANIC, init_once);
+       iint_initialized = 1;
        return 0;
 }
 security_initcall(ima_iintcache_init);
index f93641382e9f9483576578a3ba5f41286f4cc3ab..e662b89d407944103dc121b9ccb37f7e68ac62e1 100644 (file)
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
        struct ima_iint_cache *iint;
        int rc;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
                return;
        mutex_lock(&iint->mutex);
+       if (!ima_initialized)
+               goto out;
        rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
        if (rc < 0)
                goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
-       int rc;
+       int rc = 0;
 
        if (!ima_initialized || !S_ISREG(inode->i_mode))
                return 0;
index 624a96c636fdbc36a472ecaadf5fcb72c226bf38..6de4313924fb5c510f354cb3a8e2a73061e103f6 100644 (file)
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
 
+       node->children_hit = 0;
        node->parent = NULL;
        node->hit = 0;
 }