]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Jul 2010 01:29:25 +0000 (18:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Jul 2010 01:29:25 +0000 (18:29 -0700)
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/r600: fix possible NULL pointer derefernce
  drm/radeon/kms: add quirk for ASUS HD 3600 board
  include/linux/vgaarb.h: add missing part of include guard
  drm/nouveau: Fix crashes during fbcon init on single head cards.
  drm/nouveau: fix pcirom vbios shadow breakage from acpi rom patch
  drm/radeon/kms: fix shared ddc harder
  drm/i915: enable low power render writes on GEN3 hardware.
  drm/i915: Define MI_ARB_STATE bits
  vmwgfx: return -EFAULT if copy_to_user fails
  fb: handle allocation failure in alloc_apertures()
  drm: radeon: check kzalloc() result
  drm/ttm: Fix build on architectures without AGP
  drm/radeon/kms: fix gtt MC base alignment on rs4xx/rs690/rs740 asics
  drm/radeon/kms: fix possible mis-detection of sideport on rs690/rs740
  drm/radeon/kms: fix legacy tv-out pal mode

202 files changed:
Documentation/00-INDEX
Documentation/bus-virt-phys-mapping.txt [moved from Documentation/IO-mapping.txt with 100% similarity]
Documentation/kernel-parameters.txt
arch/arm/include/asm/atomic.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/kprobes-decode.c
arch/arm/kernel/process.c
arch/arm/mach-ux500/clock.c
arch/arm/mach-ux500/cpu-db5500.c
arch/arm/mm/cache-l2x0.c
arch/arm/tools/mach-types
arch/microblaze/Kconfig
arch/microblaze/include/asm/memblock.h [moved from arch/microblaze/include/asm/lmb.h with 57% similarity]
arch/microblaze/kernel/prom.c
arch/microblaze/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/abs_addr.h
arch/powerpc/include/asm/cpm.h
arch/powerpc/include/asm/cpm1.h
arch/powerpc/include/asm/lmb.h [deleted file]
arch/powerpc/include/asm/memblock.h [new file with mode: 0644]
arch/powerpc/kernel/btext.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/crash_dump.c
arch/powerpc/kernel/dma-swiotlb.c
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/fsl_booke_entry_mapping.S
arch/powerpc/kernel/machine_kexec.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/vdso.c
arch/powerpc/mm/40x_mmu.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/init_32.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/ppc_mmu_32.c
arch/powerpc/mm/stab.c
arch/powerpc/mm/tlb_nohash.c
arch/powerpc/platforms/85xx/corenet_ds.c
arch/powerpc/platforms/85xx/mpc8536_ds.c
arch/powerpc/platforms/85xx/mpc85xx_ds.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/embedded6xx/wii.c
arch/powerpc/platforms/maple/setup.c
arch/powerpc/platforms/pasemi/iommu.c
arch/powerpc/platforms/powermac/setup.c
arch/powerpc/platforms/ps3/htab.c
arch/powerpc/platforms/ps3/mm.c
arch/powerpc/platforms/ps3/os-area.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/phyp_dump.c
arch/powerpc/sysdev/dart_iommu.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/micropatch.c
arch/sh/Kconfig
arch/sh/include/asm/lmb.h [deleted file]
arch/sh/include/asm/memblock.h [new file with mode: 0644]
arch/sh/kernel/machine_kexec.c
arch/sh/kernel/setup.c
arch/sh/mm/init.c
arch/sh/mm/numa.c
arch/sparc/Kconfig
arch/sparc/include/asm/lmb.h [deleted file]
arch/sparc/include/asm/memblock.h [new file with mode: 0644]
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/prom_64.c
arch/sparc/mm/init_64.c
arch/x86/include/asm/x86_init.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/kprobes.c
arch/x86/kernel/mrst.c
arch/x86/kernel/quirks.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/x86_init.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/pci/i386.c
arch/x86/pci/mrst.c
crypto/ablkcipher.c
drivers/clocksource/cs5535-clockevt.c
drivers/edac/Kconfig
drivers/edac/mpc85xx_edac.c
drivers/gpio/cs5535-gpio.c
drivers/gpu/drm/i915/i915_gem.c
drivers/input/keyboard/Kconfig
drivers/input/mouse/Kconfig
drivers/input/mouse/synaptics.c
drivers/input/serio/Kconfig
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/w90p910_ts.c
drivers/misc/cs5535-mfgpt.c
drivers/mmc/host/sdhci-s3c.c
drivers/net/ibmveth.c
drivers/net/pcmcia/axnet_cs.c
drivers/net/r8169.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/hostap/hostap_pci.c
drivers/net/wireless/iwlwifi/iwl-sta.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/pci/setup-res.c
drivers/platform/x86/intel_scu_ipc.c
drivers/power/ds2782_battery.c
drivers/s390/block/dasd_devmap.c
drivers/s390/cio/chsc.c
drivers/spi/spi_mpc8xxx.c
drivers/vhost/net.c
fs/btrfs/ctree.c
fs/btrfs/ioctl.c
fs/ceph/auth_x.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/messenger.c
fs/ceph/osdmap.c
fs/dcache.c
fs/gfs2/bmap.c
fs/gfs2/dir.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/quota.c
fs/gfs2/quota.h
fs/inode.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/mbcache.c
fs/nfs/dir.c
fs/nfs/internal.h
fs/ocfs2/aops.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/file.c
fs/ocfs2/file.h
fs/ocfs2/journal.c
fs/ocfs2/localalloc.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/xattr.c
fs/partitions/ibm.c
fs/quota/dquot.c
fs/ubifs/shrinker.c
fs/ubifs/ubifs.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/linux-2.6/xfs_trace.h
fs/xfs/quota/xfs_qm.c
fs/xfs/xfs_mount.h
include/linux/fdtable.h
include/linux/i8042.h
include/linux/jbd2.h
include/linux/lmb.h [deleted file]
include/linux/memblock.h [new file with mode: 0644]
include/linux/mm.h
include/linux/pci.h
include/linux/syscalls.h
include/net/sock.h
ipc/sem.c
kernel/early_res.c
lib/Kconfig
lib/Makefile
lib/lmb.c [deleted file]
mm/Kconfig
mm/Makefile
mm/bootmem.c
mm/memblock.c [new file with mode: 0644]
mm/page_alloc.c
mm/page_cgroup.c
mm/vmscan.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap.c
net/bridge/br_device.c
net/bridge/br_forward.c
net/core/dev.c
net/core/neighbour.c
net/dsa/Kconfig
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv6/mip6.c
net/phonet/pep.c
net/sched/act_nat.c
net/xfrm/xfrm_policy.c
tools/perf/builtin-report.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/callchain.c
tools/perf/util/callchain.h

index dd10b51b4e652570df9a6575ac2df654bbaca15d..5405f7aecefc334b7d16c0f2764a8757e845ad43 100644 (file)
@@ -32,8 +32,6 @@ DocBook/
        - directory with DocBook templates etc. for kernel documentation.
 HOWTO
        - the process and procedures of how to do Linux kernel development.
-IO-mapping.txt
-       - how to access I/O mapped memory from within device drivers.
 IPMI.txt
        - info on Linux Intelligent Platform Management Interface (IPMI) Driver.
 IRQ-affinity.txt
@@ -84,6 +82,8 @@ blockdev/
        - info on block devices & drivers
 btmrvl.txt
        - info on Marvell Bluetooth driver usage.
+bus-virt-phys-mapping.txt
+       - how to access I/O mapped memory from within device drivers.
 cachetlb.txt
        - describes the cache/TLB flushing interfaces Linux uses.
 cdrom/
@@ -168,6 +168,8 @@ initrd.txt
        - how to use the RAM disk as an initial/temporary root filesystem.
 input/
        - info on Linux input device support.
+io-mapping.txt
+       - description of io_mapping functions in linux/io-mapping.h
 io_ordering.txt
        - info on ordering I/O writes to memory-mapped addresses.
 ioctl/
index 82d6aeb5228ff62d71a64e04519c24f35a6116e0..4ddb58df081e360311e22cafe40273d3176de46d 100644 (file)
@@ -1265,7 +1265,7 @@ and is between 256 and 4096 characters. It is defined in the file
                        If there are multiple matching configurations changing
                        the same attribute, the last one is used.
 
-       lmb=debug       [KNL] Enable lmb debug messages.
+       memblock=debug  [KNL] Enable memblock debug messages.
 
        load_ramdisk=   [RAM] List of ramdisks to load from floppy
                        See Documentation/blockdev/ramdisk.txt.
index a0162fa9456496662adb28898fcb44776e628414..7e79503ab89b5d395e634fda206e1e97d6262d04 100644 (file)
@@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v)
        int result;
 
        __asm__ __volatile__("@ atomic_add\n"
-"1:    ldrex   %0, [%2]\n"
-"      add     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 }
@@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic_add_return\n"
-"1:    ldrex   %0, [%2]\n"
-"      add     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 
@@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v)
        int result;
 
        __asm__ __volatile__("@ atomic_sub\n"
-"1:    ldrex   %0, [%2]\n"
-"      sub     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 }
@@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic_sub_return\n"
-"1:    ldrex   %0, [%2]\n"
-"      sub     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "Ir" (i)
        : "cc");
 
@@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 
        do {
                __asm__ __volatile__("@ atomic_cmpxchg\n"
-               "ldrex  %1, [%2]\n"
+               "ldrex  %1, [%3]\n"
                "mov    %0, #0\n"
-               "teq    %1, %3\n"
-               "strexeq %0, %4, [%2]\n"
-                   : "=&r" (res), "=&r" (oldval)
+               "teq    %1, %4\n"
+               "strexeq %0, %5, [%3]\n"
+                   : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
                    : "cc");
        } while (res);
@@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
        unsigned long tmp, tmp2;
 
        __asm__ __volatile__("@ atomic_clear_mask\n"
-"1:    ldrex   %0, [%2]\n"
-"      bic     %0, %0, %3\n"
-"      strex   %1, %0, [%2]\n"
+"1:    ldrex   %0, [%3]\n"
+"      bic     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (tmp), "=&r" (tmp2)
+       : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
        : "r" (addr), "Ir" (mask)
        : "cc");
 }
@@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v)
        __asm__ __volatile__("@ atomic64_read\n"
 "      ldrexd  %0, %H0, [%1]"
        : "=&r" (result)
-       : "r" (&v->counter)
+       : "r" (&v->counter), "Qo" (v->counter)
        );
 
        return result;
@@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
        u64 tmp;
 
        __asm__ __volatile__("@ atomic64_set\n"
-"1:    ldrexd  %0, %H0, [%1]\n"
-"      strexd  %0, %2, %H2, [%1]\n"
+"1:    ldrexd  %0, %H0, [%2]\n"
+"      strexd  %0, %3, %H3, [%2]\n"
 "      teq     %0, #0\n"
 "      bne     1b"
-       : "=&r" (tmp)
+       : "=&r" (tmp), "=Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
@@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_add\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      adds    %0, %0, %3\n"
-"      adc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      adds    %0, %0, %4\n"
+"      adc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
@@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_add_return\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      adds    %0, %0, %3\n"
-"      adc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      adds    %0, %0, %4\n"
+"      adc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 
@@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_sub\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      subs    %0, %0, %3\n"
-"      sbc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      subs    %0, %0, %4\n"
+"      sbc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
@@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_sub_return\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      subs    %0, %0, %3\n"
-"      sbc     %H0, %H0, %H3\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      subs    %0, %0, %4\n"
+"      sbc     %H0, %H0, %H4\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (i)
        : "cc");
 
@@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
 
        do {
                __asm__ __volatile__("@ atomic64_cmpxchg\n"
-               "ldrexd         %1, %H1, [%2]\n"
+               "ldrexd         %1, %H1, [%3]\n"
                "mov            %0, #0\n"
-               "teq            %1, %3\n"
-               "teqeq          %H1, %H3\n"
-               "strexdeq       %0, %4, %H4, [%2]"
-               : "=&r" (res), "=&r" (oldval)
+               "teq            %1, %4\n"
+               "teqeq          %H1, %H4\n"
+               "strexdeq       %0, %5, %H5, [%3]"
+               : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
                : "r" (&ptr->counter), "r" (old), "r" (new)
                : "cc");
        } while (res);
@@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_xchg\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
-"      strexd  %1, %3, %H3, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
+"      strexd  %1, %4, %H4, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
        : "r" (&ptr->counter), "r" (new)
        : "cc");
 
@@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_dec_if_positive\n"
-"1:    ldrexd  %0, %H0, [%2]\n"
+"1:    ldrexd  %0, %H0, [%3]\n"
 "      subs    %0, %0, #1\n"
 "      sbc     %H0, %H0, #0\n"
 "      teq     %H0, #0\n"
 "      bmi     2f\n"
-"      strexd  %1, %0, %H0, [%2]\n"
+"      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b\n"
 "2:"
-       : "=&r" (result), "=&r" (tmp)
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter)
        : "cc");
 
@@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
        smp_mb();
 
        __asm__ __volatile__("@ atomic64_add_unless\n"
-"1:    ldrexd  %0, %H0, [%3]\n"
-"      teq     %0, %4\n"
-"      teqeq   %H0, %H4\n"
+"1:    ldrexd  %0, %H0, [%4]\n"
+"      teq     %0, %5\n"
+"      teqeq   %H0, %H5\n"
 "      moveq   %1, #0\n"
 "      beq     2f\n"
-"      adds    %0, %0, %5\n"
-"      adc     %H0, %H0, %H5\n"
-"      strexd  %2, %0, %H0, [%3]\n"
+"      adds    %0, %0, %6\n"
+"      adc     %H0, %H0, %H6\n"
+"      strexd  %2, %0, %H0, [%4]\n"
 "      teq     %2, #0\n"
 "      bne     1b\n"
 "2:"
-       : "=&r" (val), "=&r" (ret), "=&r" (tmp)
+       : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (u), "r" (a)
        : "cc");
 
index 7ee48e7f8f318a7b453e12849b60a6832bb85770..3fd7861de4d16c508ee424e52281de0dc39a4db7 100644 (file)
@@ -162,8 +162,6 @@ ENDPROC(__und_invalid)
        @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
        @
        stmia   r5, {r0 - r4}
-
-       asm_trace_hardirqs_off
        .endm
 
        .align  5
@@ -204,7 +202,7 @@ __dabt_svc:
        @
        @ IRQs off again before pulling preserved data off the stack
        @
-       disable_irq
+       disable_irq_notrace
 
        @
        @ restore SPSR and restart the instruction
@@ -218,6 +216,9 @@ ENDPROC(__dabt_svc)
 __irq_svc:
        svc_entry
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off
+#endif
 #ifdef CONFIG_PREEMPT
        get_thread_info tsk
        ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
@@ -291,7 +292,7 @@ __und_svc:
        @
        @ IRQs off again before pulling preserved data off the stack
        @
-1:     disable_irq
+1:     disable_irq_notrace
 
        @
        @ restore SPSR and restart the instruction
@@ -327,7 +328,7 @@ __pabt_svc:
        @
        @ IRQs off again before pulling preserved data off the stack
        @
-       disable_irq
+       disable_irq_notrace
 
        @
        @ restore SPSR and restart the instruction
@@ -393,8 +394,6 @@ ENDPROC(__pabt_svc)
        @ Clear FP to mark the first stack frame
        @
        zero_fp
-
-       asm_trace_hardirqs_off
        .endm
 
        .macro  kuser_cmpxchg_check
@@ -465,9 +464,6 @@ __irq_usr:
  THUMB(        movne   r0, #0          )
  THUMB(        strne   r0, [r0]        )
 #endif
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_on
-#endif
 
        mov     why, #0
        b       ret_to_user
index da1f94906a4e2a35881bde99510760180bdba9e2..8bccbfa693ffc359dc55d6004837d2a149e2c5cd 100644 (file)
@@ -583,13 +583,14 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
 {
        insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
+       long ppc = (long)p->addr + 8;
        union reg_pair fnr;
        int rd = (insn >> 12) & 0xf;
        int rn = (insn >> 16) & 0xf;
        int rm = insn & 0xf;
        long rdv;
-       long rnv  = regs->uregs[rn];
-       long rmv  = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
+       long rnv = (rn == 15) ? ppc : regs->uregs[rn];
+       long rmv = (rm == 15) ? ppc : regs->uregs[rm];
        long cpsr = regs->ARM_cpsr;
 
        fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
index acf5e6fdb6dcbe991d7bed5d63c7a77f38c16e34..a4a9cc88bec73a525a9edd4bdc7db94cea370ce9 100644 (file)
@@ -351,17 +351,21 @@ EXPORT_SYMBOL(dump_fpu);
 
 /*
  * Shuffle the argument into the correct register before calling the
- * thread function.  r1 is the thread argument, r2 is the pointer to
- * the thread function, and r3 points to the exit function.
+ * thread function.  r4 is the thread argument, r5 is the pointer to
+ * the thread function, and r6 points to the exit function.
  */
 extern void kernel_thread_helper(void);
 asm(   ".pushsection .text\n"
 "      .align\n"
 "      .type   kernel_thread_helper, #function\n"
 "kernel_thread_helper:\n"
-"      mov     r0, r1\n"
-"      mov     lr, r3\n"
-"      mov     pc, r2\n"
+#ifdef CONFIG_TRACE_IRQFLAGS
+"      bl      trace_hardirqs_on\n"
+#endif
+"      msr     cpsr_c, r7\n"
+"      mov     r0, r4\n"
+"      mov     lr, r6\n"
+"      mov     pc, r5\n"
 "      .size   kernel_thread_helper, . - kernel_thread_helper\n"
 "      .popsection");
 
@@ -391,11 +395,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
 
        memset(&regs, 0, sizeof(regs));
 
-       regs.ARM_r1 = (unsigned long)arg;
-       regs.ARM_r2 = (unsigned long)fn;
-       regs.ARM_r3 = (unsigned long)kernel_thread_exit;
+       regs.ARM_r4 = (unsigned long)arg;
+       regs.ARM_r5 = (unsigned long)fn;
+       regs.ARM_r6 = (unsigned long)kernel_thread_exit;
+       regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
        regs.ARM_pc = (unsigned long)kernel_thread_helper;
-       regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
+       regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
 
        return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
 }
index fe84b9021c7adee213fdb94c7639c7f77e2dfaa5..0a1318fc8e2bd6c29774bcf1ab8cc48f6a5cfa0c 100644 (file)
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(clk_disable);
  */
 static unsigned long clk_mtu_get_rate(struct clk *clk)
 {
-       void __iomem *addr = __io_address(U8500_PRCMU_BASE)
+       void __iomem *addr = __io_address(UX500_PRCMU_BASE)
                + PRCM_TCR;
        u32 tcr = readl(addr);
        int mtu = (int) clk->data;
index 6a3ac4539f164e23f7109896e8bab431ff9392bf..e9278f6d67aa7529f2408fd15fd5eab1bfecdb20 100644 (file)
@@ -21,6 +21,7 @@ static struct map_desc u5500_io_desc[] __initdata = {
        __IO_DEV_DESC(U5500_GPIO2_BASE, SZ_4K),
        __IO_DEV_DESC(U5500_GPIO3_BASE, SZ_4K),
        __IO_DEV_DESC(U5500_GPIO4_BASE, SZ_4K),
+       __IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K),
 };
 
 static struct platform_device *u5500_platform_devs[] __initdata = {
index 9819869d2bc90954967a5a1ca0e636c883c09e2f..df4955885b21d412ede58df0a8c26a4fcb8ca0fb 100644 (file)
@@ -218,6 +218,9 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
        cache_id = readl(l2x0_base + L2X0_CACHE_ID);
        aux = readl(l2x0_base + L2X0_AUX_CTRL);
 
+       aux &= aux_mask;
+       aux |= aux_val;
+
        /* Determine the number of ways */
        switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
        case L2X0_CACHE_ID_PART_L310:
@@ -248,8 +251,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
        if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
 
                /* l2x0 controller is disabled */
-               aux &= aux_mask;
-               aux |= aux_val;
                writel(aux, l2x0_base + L2X0_AUX_CTRL);
 
                l2x0_inv_all();
index 8f10d24ae62540c5460eaf4b8e249c6ccd4ceac8..48cbdcb6bbd4288929f31bef94da3691b160a489 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Sat May 1 10:36:42 2010
+# Last update: Mon Jul 12 21:10:14 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -1994,7 +1994,7 @@ spark                     MACH_SPARK              SPARK                   2002
 benzina                        MACH_BENZINA            BENZINA                 2003
 blaze                  MACH_BLAZE              BLAZE                   2004
 linkstation_ls_hgl     MACH_LINKSTATION_LS_HGL LINKSTATION_LS_HGL      2005
-htckovsky              MACH_HTCVENUS           HTCVENUS                2006
+htckovsky              MACH_HTCKOVSKY          HTCKOVSKY               2006
 sony_prs505            MACH_SONY_PRS505        SONY_PRS505             2007
 hanlin_v3              MACH_HANLIN_V3          HANLIN_V3               2008
 sapphira               MACH_SAPPHIRA           SAPPHIRA                2009
@@ -2609,7 +2609,7 @@ fujitsu_tvstbsoc1 MACH_FUJITSU_TVSTBSOC1  FUJITSU_TVSTBSOC1       2621
 lexikon                        MACH_LEXIKON            LEXIKON                 2622
 mini2440v2             MACH_MINI2440V2         MINI2440V2              2623
 icontrol               MACH_ICONTROL           ICONTROL                2624
-sheevad                        MACH_SHEEVAD            SHEEVAD                 2625
+gplugd                 MACH_SHEEVAD            SHEEVAD                 2625
 qsd8x50a_st1_1         MACH_QSD8X50A_ST1_1     QSD8X50A_ST1_1          2626
 qsd8x50a_st1_5         MACH_QSD8X50A_ST1_5     QSD8X50A_ST1_5          2627
 bee                    MACH_BEE                BEE                     2628
@@ -2804,3 +2804,149 @@ teton_bga               MACH_TETON_BGA          TETON_BGA               2816
 snapper9g45            MACH_SNAPPER9G45        SNAPPER9G45             2817
 tam3517                        MACH_TAM3517            TAM3517                 2818
 pdc100                 MACH_PDC100             PDC100                  2819
+eukrea_cpuimx25sd      MACH_EUKREA_CPUIMX25    EUKREA_CPUIMX25         2820
+eukrea_cpuimx35sd      MACH_EUKREA_CPUIMX35    EUKREA_CPUIMX35         2821
+eukrea_cpuimx51sd      MACH_EUKREA_CPUIMX51SD  EUKREA_CPUIMX51SD       2822
+eukrea_cpuimx51                MACH_EUKREA_CPUIMX51    EUKREA_CPUIMX51         2823
+p565                   MACH_P565               P565                    2824
+acer_a4                        MACH_ACER_A4            ACER_A4                 2825
+davinci_dm368_bip      MACH_DAVINCI_DM368_BIP  DAVINCI_DM368_BIP       2826
+eshare                 MACH_ESHARE             ESHARE                  2827
+hw_omapl138_europa     MACH_HW_OMAPL138_EUROPA HW_OMAPL138_EUROPA      2828
+wlbargn                        MACH_WLBARGN            WLBARGN                 2829
+bm170                  MACH_BM170              BM170                   2830
+netspace_mini_v2       MACH_NETSPACE_MINI_V2   NETSPACE_MINI_V2        2831
+netspace_plug_v2       MACH_NETSPACE_PLUG_V2   NETSPACE_PLUG_V2        2832
+siemens_l1             MACH_SIEMENS_L1         SIEMENS_L1              2833
+elv_lcu1               MACH_ELV_LCU1           ELV_LCU1                2834
+mcu1                   MACH_MCU1               MCU1                    2835
+omap3_tao3530          MACH_OMAP3_TAO3530      OMAP3_TAO3530           2836
+omap3_pcutouch         MACH_OMAP3_PCUTOUCH     OMAP3_PCUTOUCH          2837
+smdkc210               MACH_SMDKC210           SMDKC210                2838
+omap3_braillo          MACH_OMAP3_BRAILLO      OMAP3_BRAILLO           2839
+spyplug                        MACH_SPYPLUG            SPYPLUG                 2840
+ginger                 MACH_GINGER             GINGER                  2841
+tny_t3530              MACH_TNY_T3530          TNY_T3530               2842
+pca102                 MACH_PCA102             PCA102                  2843
+spade                  MACH_SPADE              SPADE                   2844
+mxc25_topaz            MACH_MXC25_TOPAZ        MXC25_TOPAZ             2845
+t5325                  MACH_T5325              T5325                   2846
+gw2361                 MACH_GW2361             GW2361                  2847
+elog                   MACH_ELOG               ELOG                    2848
+income                 MACH_INCOME             INCOME                  2849
+bcm589x                        MACH_BCM589X            BCM589X                 2850
+etna                   MACH_ETNA               ETNA                    2851
+hawks                  MACH_HAWKS              HAWKS                   2852
+meson                  MACH_MESON              MESON                   2853
+xsbase255              MACH_XSBASE255          XSBASE255               2854
+pvm2030                        MACH_PVM2030            PVM2030                 2855
+mioa502                        MACH_MIOA502            MIOA502                 2856
+vvbox_sdorig2          MACH_VVBOX_SDORIG2      VVBOX_SDORIG2           2857
+vvbox_sdlite2          MACH_VVBOX_SDLITE2      VVBOX_SDLITE2           2858
+vvbox_sdpro4           MACH_VVBOX_SDPRO4       VVBOX_SDPRO4            2859
+htc_spv_m700           MACH_HTC_SPV_M700       HTC_SPV_M700            2860
+mx257sx                        MACH_MX257SX            MX257SX                 2861
+goni                   MACH_GONI               GONI                    2862
+msm8x55_svlte_ffa      MACH_MSM8X55_SVLTE_FFA  MSM8X55_SVLTE_FFA       2863
+msm8x55_svlte_surf     MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF      2864
+quickstep              MACH_QUICKSTEP          QUICKSTEP               2865
+dmw96                  MACH_DMW96              DMW96                   2866
+hammerhead             MACH_HAMMERHEAD         HAMMERHEAD              2867
+trident                        MACH_TRIDENT            TRIDENT                 2868
+lightning              MACH_LIGHTNING          LIGHTNING               2869
+iconnect               MACH_ICONNECT           ICONNECT                2870
+autobot                        MACH_AUTOBOT            AUTOBOT                 2871
+coconut                        MACH_COCONUT            COCONUT                 2872
+durian                 MACH_DURIAN             DURIAN                  2873
+cayenne                        MACH_CAYENNE            CAYENNE                 2874
+fuji                   MACH_FUJI               FUJI                    2875
+synology_6282          MACH_SYNOLOGY_6282      SYNOLOGY_6282           2876
+em1sy                  MACH_EM1SY              EM1SY                   2877
+m502                   MACH_M502               M502                    2878
+matrix518              MACH_MATRIX518          MATRIX518               2879
+tiny_gurnard           MACH_TINY_GURNARD       TINY_GURNARD            2880
+spear1310              MACH_SPEAR1310          SPEAR1310               2881
+bv07                   MACH_BV07               BV07                    2882
+mxt_td61               MACH_MXT_TD61           MXT_TD61                2883
+openrd_ultimate                MACH_OPENRD_ULTIMATE    OPENRD_ULTIMATE         2884
+devixp                 MACH_DEVIXP             DEVIXP                  2885
+miccpt                 MACH_MICCPT             MICCPT                  2886
+mic256                 MACH_MIC256             MIC256                  2887
+as1167                 MACH_AS1167             AS1167                  2888
+omap3_ibiza            MACH_OMAP3_IBIZA        OMAP3_IBIZA             2889
+u5500                  MACH_U5500              U5500                   2890
+davinci_picto          MACH_DAVINCI_PICTO      DAVINCI_PICTO           2891
+mecha                  MACH_MECHA              MECHA                   2892
+bubba3                 MACH_BUBBA3             BUBBA3                  2893
+pupitre                        MACH_PUPITRE            PUPITRE                 2894
+tegra_harmony          MACH_TEGRA_HARMONY      TEGRA_HARMONY           2895
+tegra_vogue            MACH_TEGRA_VOGUE        TEGRA_VOGUE             2896
+tegra_e1165            MACH_TEGRA_E1165        TEGRA_E1165             2897
+simplenet              MACH_SIMPLENET          SIMPLENET               2898
+ec4350tbm              MACH_EC4350TBM          EC4350TBM               2899
+pec_tc                 MACH_PEC_TC             PEC_TC                  2900
+pec_hc2                        MACH_PEC_HC2            PEC_HC2                 2901
+esl_mobilis_a          MACH_ESL_MOBILIS_A      ESL_MOBILIS_A           2902
+esl_mobilis_b          MACH_ESL_MOBILIS_B      ESL_MOBILIS_B           2903
+esl_wave_a             MACH_ESL_WAVE_A         ESL_WAVE_A              2904
+esl_wave_b             MACH_ESL_WAVE_B         ESL_WAVE_B              2905
+unisense_mmm           MACH_UNISENSE_MMM       UNISENSE_MMM            2906
+blueshark              MACH_BLUESHARK          BLUESHARK               2907
+e10                    MACH_E10                E10                     2908
+app3k_robin            MACH_APP3K_ROBIN        APP3K_ROBIN             2909
+pov15hd                        MACH_POV15HD            POV15HD                 2910
+stella                 MACH_STELLA             STELLA                  2911
+linkstation_lschl      MACH_LINKSTATION_LSCHL  LINKSTATION_LSCHL       2913
+netwalker              MACH_NETWALKER          NETWALKER               2914
+acsx106                        MACH_ACSX106            ACSX106                 2915
+atlas5_c1              MACH_ATLAS5_C1          ATLAS5_C1               2916
+nsb3ast                        MACH_NSB3AST            NSB3AST                 2917
+gnet_slc               MACH_GNET_SLC           GNET_SLC                2918
+af4000                 MACH_AF4000             AF4000                  2919
+ark9431                        MACH_ARK9431            ARK9431                 2920
+fs_s5pc100             MACH_FS_S5PC100         FS_S5PC100              2921
+omap3505nova8          MACH_OMAP3505NOVA8      OMAP3505NOVA8           2922
+omap3621_edp1          MACH_OMAP3621_EDP1      OMAP3621_EDP1           2923
+oratisaes              MACH_ORATISAES          ORATISAES               2924
+smdkv310               MACH_SMDKV310           SMDKV310                2925
+siemens_l0             MACH_SIEMENS_L0         SIEMENS_L0              2926
+ventana                        MACH_VENTANA            VENTANA                 2927
+wm8505_7in_netbook     MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK      2928
+ec4350sdb              MACH_EC4350SDB          EC4350SDB               2929
+mimas                  MACH_MIMAS              MIMAS                   2930
+titan                  MACH_TITAN              TITAN                   2931
+craneboard             MACH_CRANEBOARD         CRANEBOARD              2932
+es2440                 MACH_ES2440             ES2440                  2933
+najay_a9263            MACH_NAJAY_A9263        NAJAY_A9263             2934
+htctornado             MACH_HTCTORNADO         HTCTORNADO              2935
+dimm_mx257             MACH_DIMM_MX257         DIMM_MX257              2936
+jigen301               MACH_JIGEN              JIGEN                   2937
+smdk6450               MACH_SMDK6450           SMDK6450                2938
+meno_qng               MACH_MENO_QNG           MENO_QNG                2939
+ns2416                 MACH_NS2416             NS2416                  2940
+rpc353                 MACH_RPC353             RPC353                  2941
+tq6410                 MACH_TQ6410             TQ6410                  2942
+sky6410                        MACH_SKY6410            SKY6410                 2943
+dynasty                        MACH_DYNASTY            DYNASTY                 2944
+vivo                   MACH_VIVO               VIVO                    2945
+bury_bl7582            MACH_BURY_BL7582        BURY_BL7582             2946
+bury_bps5270           MACH_BURY_BPS5270       BURY_BPS5270            2947
+basi                   MACH_BASI               BASI                    2948
+tn200                  MACH_TN200              TN200                   2949
+c2mmi                  MACH_C2MMI              C2MMI                   2950
+meson_6236m            MACH_MESON_6236M        MESON_6236M             2951
+meson_8626m            MACH_MESON_8626M        MESON_8626M             2952
+tube                   MACH_TUBE               TUBE                    2953
+messina                        MACH_MESSINA            MESSINA                 2954
+mx50_arm2              MACH_MX50_ARM2          MX50_ARM2               2955
+cetus9263              MACH_CETUS9263          CETUS9263               2956
+brownstone             MACH_BROWNSTONE         BROWNSTONE              2957
+vmx25                  MACH_VMX25              VMX25                   2958
+vmx51                  MACH_VMX51              VMX51                   2959
+abacus                 MACH_ABACUS             ABACUS                  2960
+cm4745                 MACH_CM4745             CM4745                  2961
+oratislink             MACH_ORATISLINK         ORATISLINK              2962
+davinci_dm365_dvr      MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
+netviz                 MACH_NETVIZ             NETVIZ                  2964
+flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
+wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
index 76818f926539bf444c8aa577d12dc1af1296d665..505a08592423dbd17b7168a2782247c2d401f52e 100644 (file)
@@ -5,7 +5,7 @@ mainmenu "Linux/Microblaze Kernel Configuration"
 
 config MICROBLAZE
        def_bool y
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FUNCTION_GRAPH_TRACER
similarity index 57%
rename from arch/microblaze/include/asm/lmb.h
rename to arch/microblaze/include/asm/memblock.h
index a0a0a929c293b8fd4df06d3e9563b718a08d2725..f9c2fa331d2ad92e675aad399befb989cc575fbc 100644 (file)
@@ -6,12 +6,12 @@
  * for more details.
  */
 
-#ifndef _ASM_MICROBLAZE_LMB_H
-#define _ASM_MICROBLAZE_LMB_H
+#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
+#define _ASM_MICROBLAZE_MEMBLOCK_H
 
-/* LMB limit is OFF */
-#define LMB_REAL_LIMIT 0xFFFFFFFF
+/* MEMBLOCK limit is OFF */
+#define MEMBLOCK_REAL_LIMIT    0xFFFFFFFF
 
-#endif /* _ASM_MICROBLAZE_LMB_H */
+#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
 
 
index a15ef6d67ca92e037628f1c9808db4f5cd55e5ad..427b13b4740f822c95362f10a2510359e75fbd0a 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/kexec.h>
 #include <linux/debugfs.h>
 #include <linux/irq.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/page.h>
@@ -49,12 +49,12 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node)
 
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-       lmb_add(base, size);
+       memblock_add(base, size);
 }
 
 u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-       return lmb_alloc(size, align);
+       return memblock_alloc(size, align);
 }
 
 #ifdef CONFIG_EARLY_PRINTK
@@ -104,8 +104,8 @@ void __init early_init_devtree(void *params)
         */
        of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
 
-       /* Scan memory nodes and rebuild LMBs */
-       lmb_init();
+       /* Scan memory nodes and rebuild MEMBLOCKs */
+       memblock_init();
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory, NULL);
 
@@ -113,9 +113,9 @@ void __init early_init_devtree(void *params)
        strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
        parse_early_param();
 
-       lmb_analyze();
+       memblock_analyze();
 
-       pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
+       pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
 
        pr_debug(" <- early_init_devtree()\n");
 }
index cca3579d4268d07aab1910a463180f9dd271a4e6..db5934989926d583d837ec51c752ac90eccba6ff 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/bootmem.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/mm.h> /* mem_init */
 #include <linux/initrd.h>
 #include <linux/pagemap.h>
@@ -76,10 +76,10 @@ void __init setup_memory(void)
        u32 kernel_align_start, kernel_align_size;
 
        /* Find main memory where is the kernel */
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               memory_start = (u32) lmb.memory.region[i].base;
-               memory_end = (u32) lmb.memory.region[i].base
-                               + (u32) lmb.memory.region[i].size;
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               memory_start = (u32) memblock.memory.region[i].base;
+               memory_end = (u32) memblock.memory.region[i].base
+                               + (u32) memblock.memory.region[i].size;
                if ((memory_start <= (u32)_text) &&
                                        ((u32)_text <= memory_end)) {
                        memory_size = memory_end - memory_start;
@@ -100,7 +100,7 @@ void __init setup_memory(void)
        kernel_align_start = PAGE_DOWN((u32)_text);
        /* ALIGN can be remove because _end in vmlinux.lds.S is align */
        kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
-       lmb_reserve(kernel_align_start, kernel_align_size);
+       memblock_reserve(kernel_align_start, kernel_align_size);
        printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
                __func__, kernel_align_start, kernel_align_start
                        + kernel_align_size, kernel_align_size);
@@ -141,18 +141,18 @@ void __init setup_memory(void)
        map_size = init_bootmem_node(&contig_page_data,
                PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
 #endif
-       lmb_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
+       memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
 
        /* free bootmem is whole main memory */
        free_bootmem(memory_start, memory_size);
 
        /* reserve allocate blocks */
-       for (i = 0; i < lmb.reserved.cnt; i++) {
+       for (i = 0; i < memblock.reserved.cnt; i++) {
                pr_debug("reserved %d - 0x%08x-0x%08x\n", i,
-                       (u32) lmb.reserved.region[i].base,
-                       (u32) lmb_size_bytes(&lmb.reserved, i));
-               reserve_bootmem(lmb.reserved.region[i].base,
-                       lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT);
+                       (u32) memblock.reserved.region[i].base,
+                       (u32) memblock_size_bytes(&memblock.reserved, i));
+               reserve_bootmem(memblock.reserved.region[i].base,
+                       memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT);
        }
 #ifdef CONFIG_MMU
        init_bootmem_done = 1;
@@ -235,7 +235,7 @@ static void mm_cmdline_setup(void)
                if (maxmem && memory_size > maxmem) {
                        memory_size = maxmem;
                        memory_end = memory_start + memory_size;
-                       lmb.memory.region[0].size = memory_size;
+                       memblock.memory.region[0].size = memory_size;
                }
        }
 }
@@ -273,19 +273,19 @@ asmlinkage void __init mmu_init(void)
 {
        unsigned int kstart, ksize;
 
-       if (!lmb.reserved.cnt) {
+       if (!memblock.reserved.cnt) {
                printk(KERN_EMERG "Error memory count\n");
                machine_restart(NULL);
        }
 
-       if ((u32) lmb.memory.region[0].size < 0x1000000) {
+       if ((u32) memblock.memory.region[0].size < 0x1000000) {
                printk(KERN_EMERG "Memory must be greater than 16MB\n");
                machine_restart(NULL);
        }
        /* Find main memory where the kernel is */
-       memory_start = (u32) lmb.memory.region[0].base;
-       memory_end = (u32) lmb.memory.region[0].base +
-                               (u32) lmb.memory.region[0].size;
+       memory_start = (u32) memblock.memory.region[0].base;
+       memory_end = (u32) memblock.memory.region[0].base +
+                               (u32) memblock.memory.region[0].size;
        memory_size = memory_end - memory_start;
 
        mm_cmdline_setup(); /* FIXME parse args from command line - not used */
@@ -297,7 +297,7 @@ asmlinkage void __init mmu_init(void)
        kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
        /* kernel size */
        ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
-       lmb_reserve(kstart, ksize);
+       memblock_reserve(kstart, ksize);
 
 #if defined(CONFIG_BLK_DEV_INITRD)
        /* Remove the init RAM disk from the available memory. */
@@ -335,7 +335,7 @@ void __init *early_get_page(void)
                 * Mem start + 32MB -> here is limit
                 * because of mem mapping from head.S
                 */
-               p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
+               p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
                                        memory_start + 0x2000000));
        }
        return p;
index 6506bf4fbff13d82d1c66b629638efe435882b3d..2031a2846865e51bb3889742fe3fc60125532a4c 100644 (file)
@@ -132,7 +132,7 @@ config PPC
        select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
        select USE_GENERIC_SMP_HELPERS if SMP
index 98324c5a82867cfa5ace5179de635220ab5d01a4..9a846efe6382026d3f093988ac1b64be76894809 100644 (file)
@@ -12,7 +12,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/types.h>
 #include <asm/page.h>
index 0835eb977ba9c31b5536e3e714545d2528f7a876..e50323fe941f96d92e68ce246dad172fa58507c3 100644 (file)
@@ -6,6 +6,30 @@
 #include <linux/errno.h>
 #include <linux/of.h>
 
+/*
+ * SPI Parameter RAM common to QE and CPM.
+ */
+struct spi_pram {
+       __be16  rbase;  /* Rx Buffer descriptor base address */
+       __be16  tbase;  /* Tx Buffer descriptor base address */
+       u8      rfcr;   /* Rx function code */
+       u8      tfcr;   /* Tx function code */
+       __be16  mrblr;  /* Max receive buffer length */
+       __be32  rstate; /* Internal */
+       __be32  rdp;    /* Internal */
+       __be16  rbptr;  /* Internal */
+       __be16  rbc;    /* Internal */
+       __be32  rxtmp;  /* Internal */
+       __be32  tstate; /* Internal */
+       __be32  tdp;    /* Internal */
+       __be16  tbptr;  /* Internal */
+       __be16  tbc;    /* Internal */
+       __be32  txtmp;  /* Internal */
+       __be32  res;    /* Tx temp. */
+       __be16  rpbase; /* Relocation pointer (CPM1 only) */
+       __be16  res1;   /* Reserved */
+};
+
 /*
  * USB Controller pram common to QE and CPM.
  */
index 81b01192f4408f01c7da9553a49c5fb01ee31c10..bd07650dca56b0489b5518ccaaaa6eaf5f6de73a 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef __CPM1__
 #define __CPM1__
 
+#include <linux/init.h>
 #include <asm/8xx_immap.h>
 #include <asm/ptrace.h>
 #include <asm/cpm.h>
@@ -54,7 +55,7 @@ extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
 
 extern void cpm_setbrg(uint brg, uint rate);
 
-extern void cpm_load_patch(cpm8xx_t *cp);
+extern void __init cpm_load_patch(cpm8xx_t *cp);
 
 extern void cpm_reset(void);
 
diff --git a/arch/powerpc/include/asm/lmb.h b/arch/powerpc/include/asm/lmb.h
deleted file mode 100644 (file)
index 6f5fdf0..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_POWERPC_LMB_H
-#define _ASM_POWERPC_LMB_H
-
-#include <asm/udbg.h>
-
-#define LMB_DBG(fmt...) udbg_printf(fmt)
-
-#ifdef CONFIG_PPC32
-extern phys_addr_t lowmem_end_addr;
-#define LMB_REAL_LIMIT lowmem_end_addr
-#else
-#define LMB_REAL_LIMIT 0
-#endif
-
-#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h
new file mode 100644 (file)
index 0000000..3c29728
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef _ASM_POWERPC_MEMBLOCK_H
+#define _ASM_POWERPC_MEMBLOCK_H
+
+#include <asm/udbg.h>
+
+#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
+
+#ifdef CONFIG_PPC32
+extern phys_addr_t lowmem_end_addr;
+#define MEMBLOCK_REAL_LIMIT    lowmem_end_addr
+#else
+#define MEMBLOCK_REAL_LIMIT    0
+#endif
+
+#endif /* _ASM_POWERPC_MEMBLOCK_H */
index 26e58630ed7bf3d2f618abae3f6a74d7f4350e4b..625942ae55856f8788011bc88f35781b3f178a1b 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/sections.h>
 #include <asm/prom.h>
index 29df48f2b61a45d27f33301fe350439a94e384f4..417f7b05a9cebc6e10acfaf41eec88d17a2f7e89 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/types.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/machdep.h>
index 5fb667a60894dd6238b51db9c13b1caf93fc0253..40f524643ba6f76ed7aa3065ebc053e96f1ad27f 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/crash_dump.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/code-patching.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -33,7 +33,7 @@ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
 #ifndef CONFIG_RELOCATABLE
 void __init reserve_kdump_trampoline(void)
 {
-       lmb_reserve(0, KDUMP_RESERVE_LIMIT);
+       memblock_reserve(0, KDUMP_RESERVE_LIMIT);
 }
 
 static void __init create_trampoline(unsigned long addr)
index e7fe218b86978e52b2ff50f9d34c6efbd4b64790..02f724f367533ee60f6512a7453cb0f0e3024fbc 100644 (file)
@@ -71,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
        sd->max_direct_dma_addr = 0;
 
        /* May need to bounce if the device can't address all of DRAM */
-       if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM())
+       if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
                set_dma_ops(dev, &swiotlb_dma_ops);
 
        return NOTIFY_DONE;
index 8d1de6f31d5a6befad3d93715d4ddf338b027b55..84d6367ec0030c6611782902321d39871b9fe278 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-debug.h>
 #include <linux/gfp.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/bug.h>
 #include <asm/abs_addr.h>
 
@@ -89,7 +89,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
        /* Could be improved so platforms can set the limit in case
         * they have limited DMA windows
         */
-       return mask >= (lmb_end_of_DRAM() - 1);
+       return mask >= (memblock_end_of_DRAM() - 1);
 #else
        return 1;
 #endif
index beb4d78a23049603a7806b1ad2bdbd0c0787226b..a92c79be2728e97bb54433e6c7d49b5917a2dcde 100644 (file)
@@ -205,8 +205,7 @@ next_tlb_setup:
        bdnz+   next_tlb_setup
 
 /* 7. Jump to our 1:1 mapping */
-       li      r6, 0
-
+       mr      r6, r25
 #else
        #error You need to specify the mapping or not use this at all.
 #endif
@@ -217,7 +216,6 @@ next_tlb_setup:
 1:     mflr    r9
        rlwimi  r6,r9,0,20,31
        addi    r6,r6,(2f - 1b)
-       add     r6, r6, r25
        mtspr   SPRN_SRR0,r6
        mtspr   SPRN_SRR1,r7
        rfi                             /* start execution out of TLB1[0] entry */
index bb3d893a8353e99ea751c48a5f369e3f33b74413..89f005116aac941aa28b2792fc212ba95e2122d7 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/kexec.h>
 #include <linux/reboot.h>
 #include <linux/threads.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of.h>
 #include <asm/machdep.h>
 #include <asm/prom.h>
@@ -66,11 +66,11 @@ void __init reserve_crashkernel(void)
        unsigned long long crash_size, crash_base;
        int ret;
 
-       /* this is necessary because of lmb_phys_mem_size() */
-       lmb_analyze();
+       /* this is necessary because of memblock_phys_mem_size() */
+       memblock_analyze();
 
        /* use common parsing */
-       ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(),
+       ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
                        &crash_size, &crash_base);
        if (ret == 0 && crash_size > 0) {
                crashk_res.start = crash_base;
@@ -133,9 +133,9 @@ void __init reserve_crashkernel(void)
                        "for crashkernel (System RAM: %ldMB)\n",
                        (unsigned long)(crash_size >> 20),
                        (unsigned long)(crashk_res.start >> 20),
-                       (unsigned long)(lmb_phys_mem_size() >> 20));
+                       (unsigned long)(memblock_phys_mem_size() >> 20));
 
-       lmb_reserve(crashk_res.start, crash_size);
+       memblock_reserve(crashk_res.start, crash_size);
 }
 
 int overlaps_crashkernel(unsigned long start, unsigned long size)
index f88acf0218dbfdbe30dc0f65f20326ae5a38ee7d..139a773853f408dddefffc96b1c6f41debdd421d 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/threads.h>
 #include <linux/module.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/firmware.h>
 #include <asm/lppaca.h>
@@ -117,7 +117,7 @@ void __init allocate_pacas(void)
         * the first segment. On iSeries they must be within the area mapped
         * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
         */
-       limit = min(0x10000000ULL, lmb.rmo_size);
+       limit = min(0x10000000ULL, memblock.rmo_size);
        if (firmware_has_feature(FW_FEATURE_ISERIES))
                limit = min(limit, HvPagesToMap * HVPAGESIZE);
 
@@ -128,7 +128,7 @@ void __init allocate_pacas(void)
 
        paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
 
-       paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit));
+       paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
        memset(paca, 0, paca_size);
 
        printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
@@ -148,7 +148,7 @@ void __init free_unused_pacas(void)
        if (new_size >= paca_size)
                return;
 
-       lmb_free(__pa(paca) + new_size, paca_size - new_size);
+       memblock_free(__pa(paca) + new_size, paca_size - new_size);
 
        printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
                paca_size - new_size);
index 05131d634e7395c6ba4d9599a4f7b68734fe174d..9d3953983fb74759a2ab2bfbb2a5f7dd316e62ca 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/kexec.h>
 #include <linux/debugfs.h>
 #include <linux/irq.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/rtas.h>
@@ -98,7 +98,7 @@ static void __init move_device_tree(void)
 
        if ((memory_limit && (start + size) > memory_limit) ||
                        overlaps_crashkernel(start, size)) {
-               p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
+               p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size));
                memcpy(p, initial_boot_params, size);
                initial_boot_params = (struct boot_param_header *)p;
                DBG("Moved device tree to 0x%p\n", p);
@@ -411,13 +411,13 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
 {
        __be32 *dm, *ls, *usm;
        unsigned long l, n, flags;
-       u64 base, size, lmb_size;
+       u64 base, size, memblock_size;
        unsigned int is_kexec_kdump = 0, rngs;
 
-       ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
+       ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l);
        if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
                return 0;
-       lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
+       memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
 
        dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
        if (dm == NULL || l < sizeof(__be32))
@@ -442,11 +442,11 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
                   or if the block is not assigned to this partition (0x8) */
                if ((flags & 0x80) || !(flags & 0x8))
                        continue;
-               size = lmb_size;
+               size = memblock_size;
                rngs = 1;
                if (is_kexec_kdump) {
                        /*
-                        * For each lmb in ibm,dynamic-memory, a corresponding
+                        * For each memblock in ibm,dynamic-memory, a corresponding
                         * entry in linux,drconf-usable-memory property contains
                         * a counter 'p' followed by 'p' (base, size) duple.
                         * Now read the counter from
@@ -469,10 +469,10 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
                                if ((base + size) > 0x80000000ul)
                                        size = 0x80000000ul - base;
                        }
-                       lmb_add(base, size);
+                       memblock_add(base, size);
                } while (--rngs);
        }
-       lmb_dump_all();
+       memblock_dump_all();
        return 0;
 }
 #else
@@ -501,14 +501,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
        }
 #endif
 
-       lmb_add(base, size);
+       memblock_add(base, size);
 
        memstart_addr = min((u64)memstart_addr, base);
 }
 
 u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-       return lmb_alloc(size, align);
+       return memblock_alloc(size, align);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -534,12 +534,12 @@ static void __init early_reserve_mem(void)
        /* before we do anything, lets reserve the dt blob */
        self_base = __pa((unsigned long)initial_boot_params);
        self_size = initial_boot_params->totalsize;
-       lmb_reserve(self_base, self_size);
+       memblock_reserve(self_base, self_size);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* then reserve the initrd, if any */
        if (initrd_start && (initrd_end > initrd_start))
-               lmb_reserve(__pa(initrd_start), initrd_end - initrd_start);
+               memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 #ifdef CONFIG_PPC32
@@ -560,7 +560,7 @@ static void __init early_reserve_mem(void)
                        if (base_32 == self_base && size_32 == self_size)
                                continue;
                        DBG("reserving: %x -> %x\n", base_32, size_32);
-                       lmb_reserve(base_32, size_32);
+                       memblock_reserve(base_32, size_32);
                }
                return;
        }
@@ -571,7 +571,7 @@ static void __init early_reserve_mem(void)
                if (size == 0)
                        break;
                DBG("reserving: %llx -> %llx\n", base, size);
-               lmb_reserve(base, size);
+               memblock_reserve(base, size);
        }
 }
 
@@ -594,7 +594,7 @@ static inline unsigned long phyp_dump_calculate_reserve_size(void)
                return phyp_dump_info->reserve_bootvar;
 
        /* divide by 20 to get 5% of value */
-       tmp = lmb_end_of_DRAM();
+       tmp = memblock_end_of_DRAM();
        do_div(tmp, 20);
 
        /* round it down in multiples of 256 */
@@ -633,11 +633,11 @@ static void __init phyp_dump_reserve_mem(void)
        if (phyp_dump_info->phyp_dump_is_active) {
                /* Reserve *everything* above RMR.Area freed by userland tools*/
                base = variable_reserve_size;
-               size = lmb_end_of_DRAM() - base;
+               size = memblock_end_of_DRAM() - base;
 
                /* XXX crashed_ram_end is wrong, since it may be beyond
                 * the memory_limit, it will need to be adjusted. */
-               lmb_reserve(base, size);
+               memblock_reserve(base, size);
 
                phyp_dump_info->init_reserve_start = base;
                phyp_dump_info->init_reserve_size = size;
@@ -645,8 +645,8 @@ static void __init phyp_dump_reserve_mem(void)
                size = phyp_dump_info->cpu_state_size +
                        phyp_dump_info->hpte_region_size +
                        variable_reserve_size;
-               base = lmb_end_of_DRAM() - size;
-               lmb_reserve(base, size);
+               base = memblock_end_of_DRAM() - size;
+               memblock_reserve(base, size);
                phyp_dump_info->init_reserve_start = base;
                phyp_dump_info->init_reserve_size = size;
        }
@@ -681,8 +681,8 @@ void __init early_init_devtree(void *params)
         */
        of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
 
-       /* Scan memory nodes and rebuild LMBs */
-       lmb_init();
+       /* Scan memory nodes and rebuild MEMBLOCKs */
+       memblock_init();
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
 
@@ -690,11 +690,11 @@ void __init early_init_devtree(void *params)
        strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
        parse_early_param();
 
-       /* Reserve LMB regions used by kernel, initrd, dt, etc... */
-       lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
+       /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
+       memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
        /* If relocatable, reserve first 32k for interrupt vectors etc. */
        if (PHYSICAL_START > MEMORY_START)
-               lmb_reserve(MEMORY_START, 0x8000);
+               memblock_reserve(MEMORY_START, 0x8000);
        reserve_kdump_trampoline();
        reserve_crashkernel();
        early_reserve_mem();
@@ -706,17 +706,17 @@ void __init early_init_devtree(void *params)
 
                /* Ensure that total memory size is page-aligned, because
                 * otherwise mark_bootmem() gets upset. */
-               lmb_analyze();
-               memsize = lmb_phys_mem_size();
+               memblock_analyze();
+               memsize = memblock_phys_mem_size();
                if ((memsize & PAGE_MASK) != memsize)
                        limit = memsize & PAGE_MASK;
        }
-       lmb_enforce_memory_limit(limit);
+       memblock_enforce_memory_limit(limit);
 
-       lmb_analyze();
-       lmb_dump_all();
+       memblock_analyze();
+       memblock_dump_all();
 
-       DBG("Phys. mem: %llx\n", lmb_phys_mem_size());
+       DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
 
        /* We may need to relocate the flat tree, do it now.
         * FIXME .. and the initrd too? */
index 0e1ec6f746f653d568e4b00999ec4453c0e4bc5d..d0516dbee7621a2374f5d34131a74673d17e8a24 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/smp.h>
 #include <linux/completion.h>
 #include <linux/cpumask.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/prom.h>
@@ -934,11 +934,11 @@ void __init rtas_initialize(void)
         */
 #ifdef CONFIG_PPC64
        if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
-               rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
+               rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX);
                ibm_suspend_me_token = rtas_token("ibm,suspend-me");
        }
 #endif
-       rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
+       rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
 
 #ifdef CONFIG_RTAS_ERROR_LOGGING
        rtas_last_error_token = rtas_token("rtas-last-error");
index 5e4d852f640c7f5c6535b9e420ae74da94e942cb..b7e6c7e193ae9141cc0aa6b1c7beb3394240c937 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/serial_8250.h>
 #include <linux/debugfs.h>
 #include <linux/percpu.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of_platform.h>
 #include <asm/io.h>
 #include <asm/paca.h>
index 7d84b210f1680de8a41fb23d0ad09a7e19ce905b..a10ffc85ada77cfa802f371725a8d1b86bc7475b 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/root_dev.h>
 #include <linux/cpu.h>
 #include <linux/console.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/io.h>
 #include <asm/prom.h>
@@ -246,12 +246,12 @@ static void __init irqstack_early_init(void)
        unsigned int i;
 
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
-        * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
+        * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
        for_each_possible_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                hardirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
        }
 }
 
@@ -261,15 +261,15 @@ static void __init exc_lvl_early_init(void)
        unsigned int i;
 
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
-        * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
+        * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
        for_each_possible_cpu(i) {
                critirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #ifdef CONFIG_BOOKE
                dbgirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                mcheckirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #endif
        }
 }
index 643dcac40fcbc56c8cc3102a74e76a6aae4381eb..d135f93cb0f63b5d6f52350c4252a94b103f6018 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/bootmem.h>
 #include <linux/pci.h>
 #include <linux/lockdep.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca)
  * the CPU that ignores the top 2 bits of the address in real
  * mode so we can access kernel globals normally provided we
  * only toy with things in the RMO region. From here, we do
- * some early parsing of the device-tree to setup out LMB
+ * some early parsing of the device-tree to setup out MEMBLOCK
  * data structures, and allocate & initialize the hash table
  * and segment tables so we can start running with translation
  * enabled.
@@ -404,7 +404,7 @@ void __init setup_system(void)
 
        printk("-----------------------------------------------------\n");
        printk("ppc64_pft_size                = 0x%llx\n", ppc64_pft_size);
-       printk("physicalMemorySize            = 0x%llx\n", lmb_phys_mem_size());
+       printk("physicalMemorySize            = 0x%llx\n", memblock_phys_mem_size());
        if (ppc64_caches.dline_size != 0x80)
                printk("ppc64_caches.dcache_line_size = 0x%x\n",
                       ppc64_caches.dline_size);
@@ -443,10 +443,10 @@ static void __init irqstack_early_init(void)
         */
        for_each_possible_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc_base(THREAD_SIZE,
+                       __va(memblock_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, limit));
                hardirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc_base(THREAD_SIZE,
+                       __va(memblock_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, limit));
        }
 }
@@ -458,11 +458,11 @@ static void __init exc_lvl_early_init(void)
 
        for_each_possible_cpu(i) {
                critirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                dbgirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                mcheckirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
        }
 }
 #else
@@ -487,11 +487,11 @@ static void __init emergency_stack_init(void)
         * bringup, we need to get at them in real mode. This means they
         * must also be within the RMO region.
         */
-       limit = min(slb0_limit(), lmb.rmo_size);
+       limit = min(slb0_limit(), memblock.rmo_size);
 
        for_each_possible_cpu(i) {
                unsigned long sp;
-               sp  = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+               sp  = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
                sp += THREAD_SIZE;
                paca[i].emergency_sp = __va(sp);
        }
index d84d19224a95d1338a11fd389ad47acfbfb7f248..13002fe206e7ce16ed79f5ad31e5aec58bc329cb 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/elf.h>
 #include <linux/security.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -734,7 +734,7 @@ static int __init vdso_init(void)
        vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100;
        if (firmware_has_feature(FW_FEATURE_LPAR))
                vdso_data->platform |= 1;
-       vdso_data->physicalMemorySize = lmb_phys_mem_size();
+       vdso_data->physicalMemorySize = memblock_phys_mem_size();
        vdso_data->dcache_size = ppc64_caches.dsize;
        vdso_data->dcache_line_size = ppc64_caches.dline_size;
        vdso_data->icache_size = ppc64_caches.isize;
index 65abfcfaaa9e72be187902357b3d29577fd06d28..1dc2fa5ce1bda72b05df8db2e4d249cfcc8642ab 100644 (file)
@@ -135,7 +135,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
        /* If the size of RAM is not an exact power of two, we may not
         * have covered RAM in its entirety with 16 and 4 MiB
         * pages. Consequently, restrict the top end of RAM currently
-        * allocable so that calls to the LMB to allocate PTEs for "tail"
+        * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
         * coverage with normal-sized pages (or other reasons) do not
         * attempt to allocate outside the allowed range.
         */
index 3ecdcec0a39e11403c792b66740c5a87648bb765..98f262de558526899fa5c58fe6802654bb0a5833 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/cache.h>
 #include <linux/init.h>
 #include <linux/signal.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/pgtable.h>
@@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
        printk(KERN_INFO "Huge page(16GB) memory: "
                        "addr = 0x%lX size = 0x%lX pages = %d\n",
                        phys_addr, block_size, expected_pages);
-       if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
-               lmb_reserve(phys_addr, block_size * expected_pages);
+       if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
+               memblock_reserve(phys_addr, block_size * expected_pages);
                add_gpage(phys_addr, block_size, expected_pages);
        }
        return 0;
@@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void)
         * and we have at least 1G of RAM at boot
         */
        if (mmu_psize_defs[MMU_PAGE_16M].shift &&
-           lmb_phys_mem_size() >= 0x40000000)
+           memblock_phys_mem_size() >= 0x40000000)
                mmu_vmemmap_psize = MMU_PAGE_16M;
        else if (mmu_psize_defs[MMU_PAGE_64K].shift)
                mmu_vmemmap_psize = MMU_PAGE_64K;
@@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void)
                return 1UL << ppc64_pft_size;
 
        /* round mem_size up to next power of 2 */
-       mem_size = lmb_phys_mem_size();
+       mem_size = memblock_phys_mem_size();
        rnd_mem_size = 1UL << __ilog2(mem_size);
        if (rnd_mem_size < mem_size)
                rnd_mem_size <<= 1;
@@ -627,7 +627,7 @@ static void __init htab_initialize(void)
                else
                        limit = 0;
 
-               table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
+               table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
 
                DBG("Hash table allocated at %lx, size: %lx\n", table,
                    htab_size_bytes);
@@ -647,9 +647,9 @@ static void __init htab_initialize(void)
        prot = pgprot_val(PAGE_KERNEL);
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-       linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
-       linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
-                                                   1, lmb.rmo_size));
+       linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
+       linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
+                                                   1, memblock.rmo_size));
        memset(linear_map_hash_slots, 0, linear_map_hash_count);
 #endif /* CONFIG_DEBUG_PAGEALLOC */
 
@@ -659,16 +659,16 @@ static void __init htab_initialize(void)
         */
 
        /* create bolted the linear mapping in the hash table */
-       for (i=0; i < lmb.memory.cnt; i++) {
-               base = (unsigned long)__va(lmb.memory.region[i].base);
-               size = lmb.memory.region[i].size;
+       for (i=0; i < memblock.memory.cnt; i++) {
+               base = (unsigned long)__va(memblock.memory.region[i].base);
+               size = memblock.memory.region[i].size;
 
                DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
                    base, size, prot);
 
 #ifdef CONFIG_U3_DART
                /* Do not map the DART space. Fortunately, it will be aligned
-                * in such a way that it will not cross two lmb regions and
+                * in such a way that it will not cross two memblock regions and
                 * will fit within a single 16Mb page.
                 * The DART space is assumed to be a full 16Mb region even if
                 * we only use 2Mb of that space. We will use more of it later
index 767333005eb46e1d1075dbfe6674d5ef7eca3fbe..6a6975dc265427bf39c15979374c52501755aa3c 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/highmem.h>
 #include <linux/initrd.h>
 #include <linux/pagemap.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 
 #include <asm/pgalloc.h>
@@ -136,17 +136,17 @@ void __init MMU_init(void)
        /* parse args from command line */
        MMU_setup();
 
-       if (lmb.memory.cnt > 1) {
+       if (memblock.memory.cnt > 1) {
 #ifndef CONFIG_WII
-               lmb.memory.cnt = 1;
-               lmb_analyze();
+               memblock.memory.cnt = 1;
+               memblock_analyze();
                printk(KERN_WARNING "Only using first contiguous memory region");
 #else
                wii_memory_fixups();
 #endif
        }
 
-       total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
+       total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr;
        lowmem_end_addr = memstart_addr + total_lowmem;
 
 #ifdef CONFIG_FSL_BOOKE
@@ -161,8 +161,8 @@ void __init MMU_init(void)
                lowmem_end_addr = memstart_addr + total_lowmem;
 #ifndef CONFIG_HIGHMEM
                total_memory = total_lowmem;
-               lmb_enforce_memory_limit(lowmem_end_addr);
-               lmb_analyze();
+               memblock_enforce_memory_limit(lowmem_end_addr);
+               memblock_analyze();
 #endif /* CONFIG_HIGHMEM */
        }
 
@@ -200,7 +200,7 @@ void __init *early_get_page(void)
        if (init_bootmem_done) {
                p = alloc_bootmem_pages(PAGE_SIZE);
        } else {
-               p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
+               p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
                                        __initial_memory_limit_addr));
        }
        return p;
index e267f223fdffbb42f83511bb8da6f878b3382821..71f1415e2472399049a2273eded9c4fb8d173265 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/nodemask.h>
 #include <linux/module.h>
 #include <linux/poison.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/hugetlb.h>
 #include <linux/slab.h>
 
index 0f594d774bf7c034975ab4f20bcdf7199981aba3..1a84a8d0000503b495ff2d79355f4f756e80591a 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/initrd.h>
 #include <linux/pagemap.h>
 #include <linux/suspend.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/hugetlb.h>
 
 #include <asm/pgalloc.h>
@@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn)
 #else
        unsigned long paddr = (pfn << PAGE_SHIFT);
        int i;
-       for (i=0; i < lmb.memory.cnt; i++) {
+       for (i=0; i < memblock.memory.cnt; i++) {
                unsigned long base;
 
-               base = lmb.memory.region[i].base;
+               base = memblock.memory.region[i].base;
 
                if ((paddr >= base) &&
-                       (paddr < (base + lmb.memory.region[i].size))) {
+                       (paddr < (base + memblock.memory.region[i].size))) {
                        return 1;
                }
        }
@@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size)
 /*
  * walk_memory_resource() needs to make sure there is no holes in a given
  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
- * Instead it maintains it in lmb.memory structures.  Walk through the
+ * Instead it maintains it in memblock.memory structures.  Walk through the
  * memory regions, find holes and callback for contiguous regions.
  */
 int
 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
                void *arg, int (*func)(unsigned long, unsigned long, void *))
 {
-       struct lmb_property res;
+       struct memblock_property res;
        unsigned long pfn, len;
        u64 end;
        int ret = -1;
@@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
        res.size = (u64) nr_pages << PAGE_SHIFT;
 
        end = res.base + res.size - 1;
-       while ((res.base < end) && (lmb_find(&res) >= 0)) {
+       while ((res.base < end) && (memblock_find(&res) >= 0)) {
                pfn = (unsigned long)(res.base >> PAGE_SHIFT);
                len = (unsigned long)(res.size >> PAGE_SHIFT);
                ret = (*func)(pfn, len, arg);
@@ -184,8 +184,8 @@ void __init do_init_bootmem(void)
        unsigned long total_pages;
        int boot_mapsize;
 
-       max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
-       total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
+       max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
+       total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
 #ifdef CONFIG_HIGHMEM
        total_pages = total_lowmem >> PAGE_SHIFT;
        max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
@@ -198,16 +198,16 @@ void __init do_init_bootmem(void)
         */
        bootmap_pages = bootmem_bootmap_pages(total_pages);
 
-       start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
+       start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 
        min_low_pfn = MEMORY_START >> PAGE_SHIFT;
        boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
 
        /* Add active regions with valid PFNs */
-       for (i = 0; i < lmb.memory.cnt; i++) {
+       for (i = 0; i < memblock.memory.cnt; i++) {
                unsigned long start_pfn, end_pfn;
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
                add_active_range(0, start_pfn, end_pfn);
        }
 
@@ -218,17 +218,17 @@ void __init do_init_bootmem(void)
        free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
 
        /* reserve the sections we're already using */
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               unsigned long addr = lmb.reserved.region[i].base +
-                                    lmb_size_bytes(&lmb.reserved, i) - 1;
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               unsigned long addr = memblock.reserved.region[i].base +
+                                    memblock_size_bytes(&memblock.reserved, i) - 1;
                if (addr < lowmem_end_addr)
-                       reserve_bootmem(lmb.reserved.region[i].base,
-                                       lmb_size_bytes(&lmb.reserved, i),
+                       reserve_bootmem(memblock.reserved.region[i].base,
+                                       memblock_size_bytes(&memblock.reserved, i),
                                        BOOTMEM_DEFAULT);
-               else if (lmb.reserved.region[i].base < lowmem_end_addr) {
+               else if (memblock.reserved.region[i].base < lowmem_end_addr) {
                        unsigned long adjusted_size = lowmem_end_addr -
-                                     lmb.reserved.region[i].base;
-                       reserve_bootmem(lmb.reserved.region[i].base,
+                                     memblock.reserved.region[i].base;
+                       reserve_bootmem(memblock.reserved.region[i].base,
                                        adjusted_size, BOOTMEM_DEFAULT);
                }
        }
@@ -236,9 +236,9 @@ void __init do_init_bootmem(void)
        free_bootmem_with_active_regions(0, max_pfn);
 
        /* reserve the sections we're already using */
-       for (i = 0; i < lmb.reserved.cnt; i++)
-               reserve_bootmem(lmb.reserved.region[i].base,
-                               lmb_size_bytes(&lmb.reserved, i),
+       for (i = 0; i < memblock.reserved.cnt; i++)
+               reserve_bootmem(memblock.reserved.region[i].base,
+                               memblock_size_bytes(&memblock.reserved, i),
                                BOOTMEM_DEFAULT);
 
 #endif
@@ -251,20 +251,20 @@ void __init do_init_bootmem(void)
 /* mark pages that don't exist as nosave */
 static int __init mark_nonram_nosave(void)
 {
-       unsigned long lmb_next_region_start_pfn,
-                     lmb_region_max_pfn;
+       unsigned long memblock_next_region_start_pfn,
+                     memblock_region_max_pfn;
        int i;
 
-       for (i = 0; i < lmb.memory.cnt - 1; i++) {
-               lmb_region_max_pfn =
-                       (lmb.memory.region[i].base >> PAGE_SHIFT) +
-                       (lmb.memory.region[i].size >> PAGE_SHIFT);
-               lmb_next_region_start_pfn =
-                       lmb.memory.region[i+1].base >> PAGE_SHIFT;
+       for (i = 0; i < memblock.memory.cnt - 1; i++) {
+               memblock_region_max_pfn =
+                       (memblock.memory.region[i].base >> PAGE_SHIFT) +
+                       (memblock.memory.region[i].size >> PAGE_SHIFT);
+               memblock_next_region_start_pfn =
+                       memblock.memory.region[i+1].base >> PAGE_SHIFT;
 
-               if (lmb_region_max_pfn < lmb_next_region_start_pfn)
-                       register_nosave_region(lmb_region_max_pfn,
-                                              lmb_next_region_start_pfn);
+               if (memblock_region_max_pfn < memblock_next_region_start_pfn)
+                       register_nosave_region(memblock_region_max_pfn,
+                                              memblock_next_region_start_pfn);
        }
 
        return 0;
@@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void)
  */
 void __init paging_init(void)
 {
-       unsigned long total_ram = lmb_phys_mem_size();
-       phys_addr_t top_of_ram = lmb_end_of_DRAM();
+       unsigned long total_ram = memblock_phys_mem_size();
+       phys_addr_t top_of_ram = memblock_end_of_DRAM();
        unsigned long max_zone_pfns[MAX_NR_ZONES];
 
 #ifdef CONFIG_PPC32
@@ -327,7 +327,7 @@ void __init mem_init(void)
                swiotlb_init(1);
 #endif
 
-       num_physpages = lmb.memory.size >> PAGE_SHIFT;
+       num_physpages = memblock.memory.size >> PAGE_SHIFT;
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -364,7 +364,7 @@ void __init mem_init(void)
                highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
                for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
                        struct page *page = pfn_to_page(pfn);
-                       if (lmb_is_reserved(pfn << PAGE_SHIFT))
+                       if (memblock_is_reserved(pfn << PAGE_SHIFT))
                                continue;
                        ClearPageReserved(page);
                        init_page_count(page);
index 80d110635d24a106d7c6f0ee204438abaf56025e..f47364585ecd0bce3df61a5397fa3f9d0ccf7feb 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/nodemask.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/pfn.h>
 #include <asm/sparsemem.h>
@@ -351,7 +351,7 @@ struct of_drconf_cell {
 #define DRCONF_MEM_RESERVED    0x00000080
 
 /*
- * Read the next lmb list entry from the ibm,dynamic-memory property
+ * Read the next memblock list entry from the ibm,dynamic-memory property
  * and return the information in the provided of_drconf_cell structure.
  */
 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
@@ -372,8 +372,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
 /*
  * Retreive and validate the ibm,dynamic-memory property of the device tree.
  *
- * The layout of the ibm,dynamic-memory property is a number N of lmb
- * list entries followed by N lmb list entries.  Each lmb list entry
+ * The layout of the ibm,dynamic-memory property is a number N of memblock
+ * list entries followed by N memblock list entries.  Each memblock list entry
  * contains information as layed out in the of_drconf_cell struct above.
  */
 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
@@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
 }
 
 /*
- * Retreive and validate the ibm,lmb-size property for drconf memory
+ * Retreive and validate the ibm,memblock-size property for drconf memory
  * from the device tree.
  */
-static u64 of_get_lmb_size(struct device_node *memory)
+static u64 of_get_memblock_size(struct device_node *memory)
 {
        const u32 *prop;
        u32 len;
 
-       prop = of_get_property(memory, "ibm,lmb-size", &len);
+       prop = of_get_property(memory, "ibm,memblock-size", &len);
        if (!prop || len < sizeof(unsigned int))
                return 0;
 
@@ -540,19 +540,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
                                                      unsigned long size)
 {
        /*
-        * We use lmb_end_of_DRAM() in here instead of memory_limit because
+        * We use memblock_end_of_DRAM() in here instead of memory_limit because
         * we've already adjusted it for the limit and it takes care of
         * having memory holes below the limit.  Also, in the case of
         * iommu_is_off, memory_limit is not set but is implicitly enforced.
         */
 
-       if (start + size <= lmb_end_of_DRAM())
+       if (start + size <= memblock_end_of_DRAM())
                return size;
 
-       if (start >= lmb_end_of_DRAM())
+       if (start >= memblock_end_of_DRAM())
                return 0;
 
-       return lmb_end_of_DRAM() - start;
+       return memblock_end_of_DRAM() - start;
 }
 
 /*
@@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 static inline int __init read_usm_ranges(const u32 **usm)
 {
        /*
-        * For each lmb in ibm,dynamic-memory a corresponding
+        * For each memblock in ibm,dynamic-memory a corresponding
         * entry in linux,drconf-usable-memory property contains
         * a counter followed by that many (base, size) duple.
         * read the counter from linux,drconf-usable-memory
@@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
 {
        const u32 *dm, *usm;
        unsigned int n, rc, ranges, is_kexec_kdump = 0;
-       unsigned long lmb_size, base, size, sz;
+       unsigned long memblock_size, base, size, sz;
        int nid;
        struct assoc_arrays aa;
 
@@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
        if (!n)
                return;
 
-       lmb_size = of_get_lmb_size(memory);
-       if (!lmb_size)
+       memblock_size = of_get_memblock_size(memory);
+       if (!memblock_size)
                return;
 
        rc = of_get_assoc_arrays(memory, &aa);
@@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
                        continue;
 
                base = drmem.base_addr;
-               size = lmb_size;
+               size = memblock_size;
                ranges = 1;
 
                if (is_kexec_kdump) {
@@ -731,7 +731,7 @@ new_range:
        }
 
        /*
-        * Now do the same thing for each LMB listed in the ibm,dynamic-memory
+        * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
         * property in the ibm,dynamic-reconfiguration-memory node.
         */
        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
@@ -743,8 +743,8 @@ new_range:
 
 static void __init setup_nonnuma(void)
 {
-       unsigned long top_of_ram = lmb_end_of_DRAM();
-       unsigned long total_ram = lmb_phys_mem_size();
+       unsigned long top_of_ram = memblock_end_of_DRAM();
+       unsigned long total_ram = memblock_phys_mem_size();
        unsigned long start_pfn, end_pfn;
        unsigned int i, nid = 0;
 
@@ -753,9 +753,9 @@ static void __init setup_nonnuma(void)
        printk(KERN_DEBUG "Memory hole size: %ldMB\n",
               (top_of_ram - total_ram) >> 20);
 
-       for (i = 0; i < lmb.memory.cnt; ++i) {
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+       for (i = 0; i < memblock.memory.cnt; ++i) {
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
 
                fake_numa_create_new_node(end_pfn, &nid);
                add_active_range(nid, start_pfn, end_pfn);
@@ -813,7 +813,7 @@ static void __init dump_numa_memory_topology(void)
 
                count = 0;
 
-               for (i = 0; i < lmb_end_of_DRAM();
+               for (i = 0; i < memblock_end_of_DRAM();
                     i += (1 << SECTION_SIZE_BITS)) {
                        if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
                                if (count == 0)
@@ -833,7 +833,7 @@ static void __init dump_numa_memory_topology(void)
 }
 
 /*
- * Allocate some memory, satisfying the lmb or bootmem allocator where
+ * Allocate some memory, satisfying the memblock or bootmem allocator where
  * required. nid is the preferred node and end is the physical address of
  * the highest address in the node.
  *
@@ -847,11 +847,11 @@ static void __init *careful_zallocation(int nid, unsigned long size,
        int new_nid;
        unsigned long ret_paddr;
 
-       ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+       ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
 
        /* retry over all memory */
        if (!ret_paddr)
-               ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
+               ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
 
        if (!ret_paddr)
                panic("numa.c: cannot allocate %lu bytes for node %d",
@@ -861,14 +861,14 @@ static void __init *careful_zallocation(int nid, unsigned long size,
 
        /*
         * We initialize the nodes in numeric order: 0, 1, 2...
-        * and hand over control from the LMB allocator to the
+        * and hand over control from the MEMBLOCK allocator to the
         * bootmem allocator.  If this function is called for
         * node 5, then we know that all nodes <5 are using the
-        * bootmem allocator instead of the LMB allocator.
+        * bootmem allocator instead of the MEMBLOCK allocator.
         *
         * So, check the nid from which this allocation came
         * and double check to see if we need to use bootmem
-        * instead of the LMB.  We don't free the LMB memory
+        * instead of the MEMBLOCK.  We don't free the MEMBLOCK memory
         * since it would be useless.
         */
        new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
@@ -893,9 +893,9 @@ static void mark_reserved_regions_for_nid(int nid)
        struct pglist_data *node = NODE_DATA(nid);
        int i;
 
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               unsigned long physbase = lmb.reserved.region[i].base;
-               unsigned long size = lmb.reserved.region[i].size;
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               unsigned long physbase = memblock.reserved.region[i].base;
+               unsigned long size = memblock.reserved.region[i].size;
                unsigned long start_pfn = physbase >> PAGE_SHIFT;
                unsigned long end_pfn = PFN_UP(physbase + size);
                struct node_active_region node_ar;
@@ -903,7 +903,7 @@ static void mark_reserved_regions_for_nid(int nid)
                                             node->node_spanned_pages;
 
                /*
-                * Check to make sure that this lmb.reserved area is
+                * Check to make sure that this memblock.reserved area is
                 * within the bounds of the node that we care about.
                 * Checking the nid of the start and end points is not
                 * sufficient because the reserved area could span the
@@ -961,7 +961,7 @@ void __init do_init_bootmem(void)
        int nid;
 
        min_low_pfn = 0;
-       max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
        max_pfn = max_low_pfn;
 
        if (parse_numa_properties())
@@ -1038,7 +1038,7 @@ void __init paging_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-       max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
        free_area_init_nodes(max_zone_pfns);
 }
 
@@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
 {
        const u32 *dm;
        unsigned int drconf_cell_cnt, rc;
-       unsigned long lmb_size;
+       unsigned long memblock_size;
        struct assoc_arrays aa;
        int nid = -1;
 
@@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
        if (!drconf_cell_cnt)
                return -1;
 
-       lmb_size = of_get_lmb_size(memory);
-       if (!lmb_size)
+       memblock_size = of_get_memblock_size(memory);
+       if (!memblock_size)
                return -1;
 
        rc = of_get_assoc_arrays(memory, &aa);
@@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
                        continue;
 
                if ((scn_addr < drmem.base_addr)
-                   || (scn_addr >= (drmem.base_addr + lmb_size)))
+                   || (scn_addr >= (drmem.base_addr + memblock_size)))
                        continue;
 
                nid = of_drconf_to_nid_single(&drmem, &aa);
@@ -1113,7 +1113,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
 /*
  * Find the node associated with a hot added memory section for memory
  * represented in the device tree as a node (i.e. memory@XXXX) for
- * each lmb.
+ * each memblock.
  */
 int hot_add_node_scn_to_nid(unsigned long scn_addr)
 {
@@ -1154,8 +1154,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
 
 /*
  * Find the node associated with a hot added memory section.  Section
- * corresponds to a SPARSEMEM section, not an LMB.  It is assumed that
- * sections are fully contained within a single LMB.
+ * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
+ * sections are fully contained within a single MEMBLOCK.
  */
 int hot_add_scn_to_nid(unsigned long scn_addr)
 {
index 34347b2e7e313995c803d34c847034dff7a9c1cf..a87ead0138b45b3203128b81718be17c4a7158bf 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/pgtable.h>
@@ -198,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
         * mem_init() sets high_memory so only do the check after that.
         */
        if (mem_init_done && (p < virt_to_phys(high_memory)) &&
-           !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
+           !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
                printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
                       (unsigned long long)p, __builtin_return_address(0));
                return NULL;
@@ -331,7 +331,7 @@ void __init mapin_ram(void)
                s = mmu_mapin_ram(top);
                __mapin_ram_chunk(s, top);
 
-               top = lmb_end_of_DRAM();
+               top = memblock_end_of_DRAM();
                s = wii_mmu_mapin_mem2(top);
                __mapin_ram_chunk(s, top);
        }
index d050fc8d9714830ce3cea027465f794864a8a830..21d6dfab7942f73e345a22c418ec4a5ee526edc6 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/pgalloc.h>
@@ -67,7 +67,7 @@ static void *early_alloc_pgtable(unsigned long size)
        if (init_bootmem_done)
                pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
        else
-               pt = __va(lmb_alloc_base(size, size,
+               pt = __va(memblock_alloc_base(size, size,
                                         __pa(MAX_DMA_ADDRESS)));
        memset(pt, 0, size);
 
index f11c2cdcb0fedd9a1afb89222ed4e6f6a9037116..f8a01829d64fd4820921f3148a17c32b98e74b67 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/mmu.h>
@@ -223,7 +223,7 @@ void __init MMU_init_hw(void)
         * Find some memory for the hash table.
         */
        if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
-       Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
+       Hash = __va(memblock_alloc_base(Hash_size, Hash_size,
                                   __initial_memory_limit_addr));
        cacheable_memzero(Hash, Hash_size);
        _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
index 687fddaa24c564fb19491b146525ba2ffc58ac67..446a01842a73eff71046e558bd71ea37677ce254 100644 (file)
@@ -12,7 +12,7 @@
  *      2 of the License, or (at your option) any later version.
  */
 
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
@@ -252,7 +252,7 @@ void __init stabs_alloc(void)
                if (cpu == 0)
                        continue; /* stab for CPU 0 is statically allocated */
 
-               newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
+               newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
                                         1<<SID_SHIFT);
                newstab = (unsigned long)__va(newstab);
 
index e81d5d67f834021c9304e9c35ab2030864a027a9..d8695b02a96895fbad4c002224d0e4c25c316aa0 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/pagemap.h>
 #include <linux/preempt.h>
 #include <linux/spinlock.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
@@ -426,7 +426,7 @@ static void __early_init_mmu(int boot_cpu)
        /* Set the global containing the top of the linear mapping
         * for use by the TLB miss code
         */
-       linear_map_top = lmb_end_of_DRAM();
+       linear_map_top = memblock_end_of_DRAM();
 
        /* A sync won't hurt us after mucking around with
         * the MMU configuration
index 534c2ecc89d904dfeff5448009f24405624522d4..2ab338c9ac37fb7cfdd6ba4554877e17ec0552c5 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/kdev_t.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -100,7 +100,7 @@ void __init corenet_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 004b7d36cdb7512e33b8b774b25057d0b6230e2c..f79f2f1021416c5a4e1f6a97ebe4cab9ff02d57b 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -94,7 +94,7 @@ static void __init mpc8536_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 544011a562fb283c64b864b295fe17ff046d4f40..8190bc25bf27327e762e798e0902cf83c20c3c8d 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -190,7 +190,7 @@ static void __init mpc85xx_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 8fe87fc61485bdc1c48c4bf430ed626933ce8b22..494513682d708d3356ea376339be8bf577f2333a 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_device.h>
 #include <linux/phy.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/atomic.h>
@@ -325,7 +325,7 @@ static void __init mpc85xx_mds_setup_arch(void)
 #endif /* CONFIG_QUICC_ENGINE */
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 2aa69a69bcc8b3147e7c16b82b5a74ea35777316..b11c3535f3509cac28e2baf7fd5e999001ab0709 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/system.h>
 #include <asm/time.h>
@@ -103,7 +103,7 @@ mpc86xx_hpcn_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 4326b737d913e02d4541320c5bb3cecc45f7220b..3712900471ba2e9033419ab31d620ab8d2a983b2 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/prom.h>
 #include <asm/iommu.h>
@@ -845,10 +845,10 @@ static int __init cell_iommu_init_disabled(void)
        /* If we found a DMA window, we check if it's big enough to enclose
         * all of physical memory. If not, we force enable IOMMU
         */
-       if (np && size < lmb_end_of_DRAM()) {
+       if (np && size < memblock_end_of_DRAM()) {
                printk(KERN_WARNING "iommu: force-enabled, dma window"
                       " (%ldMB) smaller than total memory (%lldMB)\n",
-                      size >> 20, lmb_end_of_DRAM() >> 20);
+                      size >> 20, memblock_end_of_DRAM() >> 20);
                return -ENODEV;
        }
 
@@ -1064,7 +1064,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
        }
 
        fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
-       fsize = lmb_phys_mem_size();
+       fsize = memblock_phys_mem_size();
 
        if ((fbase + fsize) <= 0x800000000ul)
                hbase = 0; /* use the device tree window */
@@ -1169,7 +1169,7 @@ static int __init cell_iommu_init(void)
         * Note: should we make sure we have the IOMMU actually disabled ?
         */
        if (iommu_is_off ||
-           (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull))
+           (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
                if (cell_iommu_init_disabled() == 0)
                        goto bail;
 
index 174a04ac4806e714e49e8895f0f4180363d838b7..5cdcc7c8d9738416a8055840235ca5954d33abd2 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/seq_file.h>
 #include <linux/kexec.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <mm/mmu_decl.h>
 
 #include <asm/io.h>
@@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x)
 
 void __init wii_memory_fixups(void)
 {
-       struct lmb_property *p = lmb.memory.region;
+       struct memblock_property *p = memblock.memory.region;
 
        /*
         * This is part of a workaround to allow the use of two
@@ -77,7 +77,7 @@ void __init wii_memory_fixups(void)
         * between both ranges.
         */
 
-       BUG_ON(lmb.memory.cnt != 2);
+       BUG_ON(memblock.memory.cnt != 2);
        BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
 
        p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
@@ -92,11 +92,11 @@ void __init wii_memory_fixups(void)
 
        p[0].size += wii_hole_size + p[1].size;
 
-       lmb.memory.cnt = 1;
-       lmb_analyze();
+       memblock.memory.cnt = 1;
+       memblock_analyze();
 
        /* reserve the hole */
-       lmb_reserve(wii_hole_start, wii_hole_size);
+       memblock_reserve(wii_hole_start, wii_hole_size);
 
        /* allow ioremapping the address space in the hole */
        __allow_ioremap_reserved = 1;
index 39df70529d292d594a0f52fad57b5e19425a78c9..3fff8d979b41a809743ca28e4234fcaa28e81400 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/smp.h>
 #include <linux/bitops.h>
 #include <linux/of_device.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
index 7b1d608ea3c828c5fe195e77a2e1aa2f8a70e1e2..1f9fb2c577613c64335f4c61b348efc6701de3a2 100644 (file)
@@ -204,7 +204,7 @@ int __init iob_init(struct device_node *dn)
        pr_debug(" -> %s\n", __func__);
 
        /* Allocate a spare page to map all invalid IOTLB pages. */
-       tmp = lmb_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
+       tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
        if (!tmp)
                panic("IOBMAP: Cannot allocate spare page!");
        /* Empty l1 is marked invalid */
@@ -275,7 +275,7 @@ void __init alloc_iobmap_l2(void)
        return;
 #endif
        /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
-       iob_l2_base = (u32 *)abs_to_virt(lmb_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
+       iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
 
        printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
 }
index f1d0132ebcc770f0d7fe8e05ebe08ff5b22e449d..9deb274841f1198d52eb6b2cc4029a374cd1c7fc 100644 (file)
@@ -51,7 +51,7 @@
 #include <linux/suspend.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/reg.h>
 #include <asm/sections.h>
@@ -619,7 +619,7 @@ static int __init pmac_probe(void)
         * driver needs that. We have to allocate it now. We allocate 4k
         * (1 small page) for now.
         */
-       smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
+       smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
 #endif /* CONFIG_PMAC_SMU */
 
        return 1;
index 1e8a1e39dfe816abc4e97cd1c33f12463edaf267..2c0ed87f20244c88e4feea8c79aca1a29c4115a0 100644 (file)
@@ -19,7 +19,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/machdep.h>
 #include <asm/prom.h>
index 7925751e464acd2941ce5dce7caa037fc210a755..c2045880e674afc91a5295711042f4722c6aa510 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/memory_hotplug.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/cell-regs.h>
@@ -318,8 +318,8 @@ static int __init ps3_mm_add_memory(void)
                return result;
        }
 
-       lmb_add(start_addr, map.r1.size);
-       lmb_analyze();
+       memblock_add(start_addr, map.r1.size);
+       memblock_analyze();
 
        result = online_pages(start_pfn, nr_pages);
 
index dd521a181f23bdb5237607e65fabbe7df6d2988a..5b759b66959833d60d1fe0f3cc524d0a3b023e2a 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/fs.h>
 #include <linux/syscalls.h>
 #include <linux/ctype.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 
@@ -723,7 +723,7 @@ static void os_area_queue_work(void)
  * flash to a high address in the boot memory region and then puts that RAM
  * address and the byte count into the repository for retrieval by the guest.
  * We copy the data we want into a static variable and allow the memory setup
- * by the HV to be claimed by the lmb manager.
+ * by the HV to be claimed by the memblock manager.
  *
  * The os area mirror will not be available to a second stage kernel, and
  * the header verify will fail.  In this case, the saved_params values will
index 01e7b5bb3c1d3853469694bf00235555108321f6..deab5f9460907891a3134a1d7f6bcdb459796dcf 100644 (file)
  */
 
 #include <linux/of.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/vmalloc.h>
 #include <asm/firmware.h>
 #include <asm/machdep.h>
 #include <asm/pSeries_reconfig.h>
 #include <asm/sparsemem.h>
 
-static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
+static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
 {
        unsigned long start, start_pfn;
        struct zone *zone;
@@ -26,7 +26,7 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
        start_pfn = base >> PAGE_SHIFT;
 
        if (!pfn_valid(start_pfn)) {
-               lmb_remove(base, lmb_size);
+               memblock_remove(base, memblock_size);
                return 0;
        }
 
@@ -41,20 +41,20 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
         * to sysfs "state" file and we can't remove sysfs entries
         * while writing to it. So we have to defer it to here.
         */
-       ret = __remove_pages(zone, start_pfn, lmb_size >> PAGE_SHIFT);
+       ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
        if (ret)
                return ret;
 
        /*
         * Update memory regions for memory remove
         */
-       lmb_remove(base, lmb_size);
+       memblock_remove(base, memblock_size);
 
        /*
         * Remove htab bolted mappings for this section of memory
         */
        start = (unsigned long)__va(base);
-       ret = remove_section_mapping(start, start + lmb_size);
+       ret = remove_section_mapping(start, start + memblock_size);
 
        /* Ensure all vmalloc mappings are flushed in case they also
         * hit that section of memory
@@ -69,7 +69,7 @@ static int pseries_remove_memory(struct device_node *np)
        const char *type;
        const unsigned int *regs;
        unsigned long base;
-       unsigned int lmb_size;
+       unsigned int memblock_size;
        int ret = -EINVAL;
 
        /*
@@ -80,16 +80,16 @@ static int pseries_remove_memory(struct device_node *np)
                return 0;
 
        /*
-        * Find the bae address and size of the lmb
+        * Find the bae address and size of the memblock
         */
        regs = of_get_property(np, "reg", NULL);
        if (!regs)
                return ret;
 
        base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       memblock_size = regs[3];
 
-       ret = pseries_remove_lmb(base, lmb_size);
+       ret = pseries_remove_memblock(base, memblock_size);
        return ret;
 }
 
@@ -98,7 +98,7 @@ static int pseries_add_memory(struct device_node *np)
        const char *type;
        const unsigned int *regs;
        unsigned long base;
-       unsigned int lmb_size;
+       unsigned int memblock_size;
        int ret = -EINVAL;
 
        /*
@@ -109,43 +109,43 @@ static int pseries_add_memory(struct device_node *np)
                return 0;
 
        /*
-        * Find the base and size of the lmb
+        * Find the base and size of the memblock
         */
        regs = of_get_property(np, "reg", NULL);
        if (!regs)
                return ret;
 
        base = *(unsigned long *)regs;
-       lmb_size = regs[3];
+       memblock_size = regs[3];
 
        /*
         * Update memory region to represent the memory add
         */
-       ret = lmb_add(base, lmb_size);
+       ret = memblock_add(base, memblock_size);
        return (ret < 0) ? -EINVAL : 0;
 }
 
 static int pseries_drconf_memory(unsigned long *base, unsigned int action)
 {
        struct device_node *np;
-       const unsigned long *lmb_size;
+       const unsigned long *memblock_size;
        int rc;
 
        np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
        if (!np)
                return -EINVAL;
 
-       lmb_size = of_get_property(np, "ibm,lmb-size", NULL);
-       if (!lmb_size) {
+       memblock_size = of_get_property(np, "ibm,memblock-size", NULL);
+       if (!memblock_size) {
                of_node_put(np);
                return -EINVAL;
        }
 
        if (action == PSERIES_DRCONF_MEM_ADD) {
-               rc = lmb_add(*base, *lmb_size);
+               rc = memblock_add(*base, *memblock_size);
                rc = (rc < 0) ? -EINVAL : 0;
        } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
-               rc = pseries_remove_lmb(*base, *lmb_size);
+               rc = pseries_remove_memblock(*base, *memblock_size);
        } else {
                rc = -EINVAL;
        }
index d26182d42cbfb5a712fb6803dfdf056f560e0c33..395848e30c523b36f7b31bbd258610777e87b4dd 100644 (file)
@@ -66,7 +66,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
        tcep = ((u64 *)tbl->it_base) + index;
 
        while (npages--) {
-               /* can't move this out since we might cross LMB boundary */
+               /* can't move this out since we might cross MEMBLOCK boundary */
                rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
                *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
 
index 7ebd9e88d369c242b63dd48b0bd7a02a95f94449..6e7742da0072a521be908d6ded562a3af89b2db0 100644 (file)
@@ -255,12 +255,12 @@ void invalidate_last_dump(struct phyp_dump_header *ph, unsigned long addr)
 
 /* ------------------------------------------------- */
 /**
- * release_memory_range -- release memory previously lmb_reserved
+ * release_memory_range -- release memory previously memblock_reserved
  * @start_pfn: starting physical frame number
  * @nr_pages: number of pages to free.
  *
  * This routine will release memory that had been previously
- * lmb_reserved in early boot. The released memory becomes
+ * memblock_reserved in early boot. The released memory becomes
  * available for genreal use.
  */
 static void release_memory_range(unsigned long start_pfn,
index c8b96ed7c0158a81137777b050fbfd745d2dab0a..559db2b846a9dcd4428d4feb0467ff14bdd4cdbc 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
 #include <linux/suspend.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 #include <asm/io.h>
 #include <asm/prom.h>
@@ -232,7 +232,7 @@ static int __init dart_init(struct device_node *dart_node)
         * that to work around what looks like a problem with the HT bridge
         * prefetching into invalid pages and corrupting data
         */
-       tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
+       tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
        dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
                                         DARTMAP_RPNMASK);
 
@@ -407,7 +407,7 @@ void __init alloc_dart_table(void)
        if (iommu_is_off)
                return;
 
-       if (!iommu_force_on && lmb_end_of_DRAM() <= 0x40000000ull)
+       if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
                return;
 
        /* 512 pages (2MB) is max DART tablesize. */
@@ -416,7 +416,7 @@ void __init alloc_dart_table(void)
         * will blow up an entire large page anyway in the kernel mapping
         */
        dart_tablebase = (unsigned long)
-               abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
+               abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
 
        printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
 }
index a14760fe513a6bc7d5bcbc9780470b1b64e84516..356c6a0e1b2367768e17354f57b6369b604e32bb 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/log2.h>
 #include <linux/slab.h>
 
@@ -190,7 +190,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
        pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar);
 
        /* Setup inbound mem window */
-       mem = lmb_end_of_DRAM();
+       mem = memblock_end_of_DRAM();
        sz = min(mem, paddr_lo);
        mem_log = __ilog2_u64(sz);
 
index d8d602840757cad9301fd5a2a38f2ed7ee6dec6b..c0bb76ef724206061551938dbddd1749b89fa814 100644 (file)
@@ -4,6 +4,7 @@
  * also relocates SMC2, but this would require additional changes
  * to uart.c, so I am holding off on that for a moment.
  */
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -16,6 +17,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/8xx_immap.h>
+#include <asm/cpm.h>
 #include <asm/cpm1.h>
 
 /*
@@ -24,7 +26,7 @@
 
 #ifdef CONFIG_I2C_SPI_UCODE_PATCH
 
-uint patch_2000[] = {
+static uint patch_2000[] __initdata = {
        0x7FFFEFD9,
        0x3FFD0000,
        0x7FFB49F7,
@@ -143,7 +145,7 @@ uint patch_2000[] = {
        0x5F8247F8
 };
 
-uint patch_2f00[] = {
+static uint patch_2f00[] __initdata = {
        0x3E303430,
        0x34343737,
        0xABF7BF9B,
@@ -182,7 +184,7 @@ uint patch_2f00[] = {
 
 #ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
 
-uint patch_2000[] = {
+static uint patch_2000[] __initdata = {
        0x3fff0000,
        0x3ffd0000,
        0x3ffb0000,
@@ -505,7 +507,7 @@ uint patch_2000[] = {
        0x6079e2bb
 };
 
-uint patch_2f00[] = {
+static uint patch_2f00[] __initdata = {
        0x30303030,
        0x3e3e3434,
        0xabbf9b99,
@@ -572,7 +574,7 @@ uint patch_2f00[] = {
        0xf22f3f23
 };
 
-uint patch_2e00[] = {
+static uint patch_2e00[] __initdata = {
        0x27eeeeee,
        0xeeeeeeee,
        0xeeeeeeee,
@@ -598,7 +600,7 @@ uint patch_2e00[] = {
 
 #ifdef CONFIG_USB_SOF_UCODE_PATCH
 
-uint patch_2000[] = {
+static uint patch_2000[] __initdata = {
        0x7fff0000,
        0x7ffd0000,
        0x7ffb0000,
@@ -613,21 +615,25 @@ uint patch_2000[] = {
        0x60750000
 };
 
-uint patch_2f00[] = {
+static uint patch_2f00[] __initdata = {
        0x3030304c,
        0xcab9e441,
        0xa1aaf220
 };
 #endif
 
-void
-cpm_load_patch(cpm8xx_t        *cp)
+void __init cpm_load_patch(cpm8xx_t *cp)
 {
        volatile uint           *dp;            /* Dual-ported RAM. */
        volatile cpm8xx_t       *commproc;
+#if defined(CONFIG_I2C_SPI_UCODE_PATCH) || \
+    defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
        volatile iic_t          *iip;
-       volatile spi_t          *spp;
+       volatile struct spi_pram *spp;
+#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
        volatile smc_uart_t     *smp;
+#endif
+#endif
        int     i;
 
        commproc = cp;
@@ -668,8 +674,8 @@ cpm_load_patch(cpm8xx_t     *cp)
        /* Put SPI above the IIC, also 32-byte aligned.
        */
        i = (RPBASE + sizeof(iic_t) + 31) & ~31;
-       spp = (spi_t *)&commproc->cp_dparam[PROFF_SPI];
-       spp->spi_rpbase = i;
+       spp = (struct spi_pram *)&commproc->cp_dparam[PROFF_SPI];
+       spp->rpbase = i;
 
 # if defined(CONFIG_I2C_SPI_UCODE_PATCH)
        commproc->cp_cpmcr1 = 0x802a;
index 573fca1fbd9b699d7beff90a4d162af8a01652cd..82868fee21fdbed9f670edde147032253f82f459 100644 (file)
@@ -10,7 +10,7 @@ config SUPERH
        select EMBEDDED
        select HAVE_CLK
        select HAVE_IDE if HAS_IOPORT
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_OPROFILE
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_ARCH_TRACEHOOK
diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h
deleted file mode 100644 (file)
index 9b437f6..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_LMB_H
-#define __ASM_SH_LMB_H
-
-#define LMB_REAL_LIMIT 0
-
-#endif /* __ASM_SH_LMB_H */
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h
new file mode 100644 (file)
index 0000000..dfe683b
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH_MEMBLOCK_H
+#define __ASM_SH_MEMBLOCK_H
+
+#define MEMBLOCK_REAL_LIMIT    0
+
+#endif /* __ASM_SH_MEMBLOCK_H */
index 5a559e666eb352ec2da5418a5fcbd969babbf6a1..e2a3af31ff9930b534c5b1ee7e5bce9721905ba1 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/numa.h>
 #include <linux/ftrace.h>
 #include <linux/suspend.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
@@ -157,10 +157,10 @@ void __init reserve_crashkernel(void)
        unsigned long long crash_size, crash_base;
        int ret;
 
-       /* this is necessary because of lmb_phys_mem_size() */
-       lmb_analyze();
+       /* this is necessary because of memblock_phys_mem_size() */
+       memblock_analyze();
 
-       ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(),
+       ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
                        &crash_size, &crash_base);
        if (ret == 0 && crash_size > 0) {
                crashk_res.start = crash_base;
@@ -172,14 +172,14 @@ void __init reserve_crashkernel(void)
 
        crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1);
        if (!crashk_res.start) {
-               unsigned long max = lmb_end_of_DRAM() - memory_limit;
-               crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max);
+               unsigned long max = memblock_end_of_DRAM() - memory_limit;
+               crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
                if (!crashk_res.start) {
                        pr_err("crashkernel allocation failed\n");
                        goto disable;
                }
        } else {
-               ret = lmb_reserve(crashk_res.start, crash_size);
+               ret = memblock_reserve(crashk_res.start, crash_size);
                if (unlikely(ret < 0)) {
                        pr_err("crashkernel reservation failed - "
                               "memory is in use\n");
@@ -192,7 +192,7 @@ void __init reserve_crashkernel(void)
        /*
         * Crash kernel trumps memory limit
         */
-       if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) {
+       if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
                memory_limit = 0;
                pr_info("Disabled memory limit for crashkernel\n");
        }
@@ -201,7 +201,7 @@ void __init reserve_crashkernel(void)
                "for crashkernel (System RAM: %ldMB)\n",
                (unsigned long)(crash_size >> 20),
                (unsigned long)(crashk_res.start),
-               (unsigned long)(lmb_phys_mem_size() >> 20));
+               (unsigned long)(memblock_phys_mem_size() >> 20));
 
        return;
 
index 272734681d29dd6262772a467735a04cd91ab88a..e769401a78ba3218946620d62ad2a83875448ea7 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/page.h>
@@ -141,10 +141,10 @@ void __init check_for_initrd(void)
                goto disable;
        }
 
-       if (unlikely(end > lmb_end_of_DRAM())) {
+       if (unlikely(end > memblock_end_of_DRAM())) {
                pr_err("initrd extends beyond end of memory "
                       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-                      end, (unsigned long)lmb_end_of_DRAM());
+                      end, (unsigned long)memblock_end_of_DRAM());
                goto disable;
        }
 
@@ -161,7 +161,7 @@ void __init check_for_initrd(void)
        initrd_start = (unsigned long)__va(__pa(start));
        initrd_end = initrd_start + INITRD_SIZE;
 
-       lmb_reserve(__pa(initrd_start), INITRD_SIZE);
+       memblock_reserve(__pa(initrd_start), INITRD_SIZE);
 
        return;
 
index 46f84de6246901e5704942d2fa8bea3ae4d9b87f..d0e249100e98d044b804414b80d725a0ebd7e43d 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/pagemap.h>
 #include <linux/percpu.h>
 #include <linux/io.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/dma-mapping.h>
 #include <asm/mmu_context.h>
 #include <asm/mmzone.h>
@@ -33,7 +33,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 void __init generic_mem_init(void)
 {
-       lmb_add(__MEMORY_START, __MEMORY_SIZE);
+       memblock_add(__MEMORY_START, __MEMORY_SIZE);
 }
 
 void __init __weak plat_mem_setup(void)
@@ -176,12 +176,12 @@ void __init allocate_pgdat(unsigned int nid)
        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
-       phys = __lmb_alloc_base(sizeof(struct pglist_data),
+       phys = __memblock_alloc_base(sizeof(struct pglist_data),
                                SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
        /* Retry with all of system memory */
        if (!phys)
-               phys = __lmb_alloc_base(sizeof(struct pglist_data),
-                                       SMP_CACHE_BYTES, lmb_end_of_DRAM());
+               phys = __memblock_alloc_base(sizeof(struct pglist_data),
+                                       SMP_CACHE_BYTES, memblock_end_of_DRAM());
        if (!phys)
                panic("Can't allocate pgdat for node %d\n", nid);
 
@@ -212,7 +212,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
 
        total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
 
-       paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
+       paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
        if (!paddr)
                panic("Can't allocate bootmap for nid[%d]\n", nid);
 
@@ -227,9 +227,9 @@ static void __init bootmem_init_one_node(unsigned int nid)
         */
        if (nid == 0) {
                /* Reserve the sections we're already using. */
-               for (i = 0; i < lmb.reserved.cnt; i++)
-                       reserve_bootmem(lmb.reserved.region[i].base,
-                                       lmb_size_bytes(&lmb.reserved, i),
+               for (i = 0; i < memblock.reserved.cnt; i++)
+                       reserve_bootmem(memblock.reserved.region[i].base,
+                                       memblock_size_bytes(&memblock.reserved, i),
                                        BOOTMEM_DEFAULT);
        }
 
@@ -241,10 +241,10 @@ static void __init do_init_bootmem(void)
        int i;
 
        /* Add active regions with valid PFNs. */
-       for (i = 0; i < lmb.memory.cnt; i++) {
+       for (i = 0; i < memblock.memory.cnt; i++) {
                unsigned long start_pfn, end_pfn;
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
                __add_active_range(0, start_pfn, end_pfn);
        }
 
@@ -276,7 +276,7 @@ static void __init early_reserve_mem(void)
         * this catches the (definitely buggy) case of us accidentally
         * initializing the bootmem allocator with an invalid RAM area.
         */
-       lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+       memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
                    (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
                    (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
 
@@ -284,7 +284,7 @@ static void __init early_reserve_mem(void)
         * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
         */
        if (CONFIG_ZERO_PAGE_OFFSET != 0)
-               lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+               memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
 
        /*
         * Handle additional early reservations
@@ -299,27 +299,27 @@ void __init paging_init(void)
        unsigned long vaddr, end;
        int nid;
 
-       lmb_init();
+       memblock_init();
 
        sh_mv.mv_mem_init();
 
        early_reserve_mem();
 
-       lmb_enforce_memory_limit(memory_limit);
-       lmb_analyze();
+       memblock_enforce_memory_limit(memory_limit);
+       memblock_analyze();
 
-       lmb_dump_all();
+       memblock_dump_all();
 
        /*
         * Determine low and high memory ranges:
         */
-       max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
        min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
 
        nodes_clear(node_online_map);
 
        memory_start = (unsigned long)__va(__MEMORY_START);
-       memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
+       memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
 
        uncached_init();
        pmb_init();
index a2e645f64a371a04a625893cb0c5d484ad7c045e..3d85225b9e953aa68af579d7300f90119f5e694a 100644 (file)
@@ -9,7 +9,7 @@
  */
 #include <linux/module.h>
 #include <linux/bootmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/numa.h>
 #include <linux/pfn.h>
@@ -39,12 +39,12 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
        pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
                         PAGE_KERNEL);
 
-       lmb_add(start, end - start);
+       memblock_add(start, end - start);
 
        __add_active_range(nid, start_pfn, end_pfn);
 
        /* Node-local pgdat */
-       NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
+       NODE_DATA(nid) = __va(memblock_alloc_base(sizeof(struct pglist_data),
                                             SMP_CACHE_BYTES, end));
        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 
@@ -54,7 +54,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
 
        /* Node-local bootmap */
        bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
-       bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
+       bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
                                       PAGE_SIZE, end);
        init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
                          start_pfn, end_pfn);
index 6f1470baa314e8787213efedb78e0a3f9d6f9e56..c0015db247ba46ed787b4b16b47f402a3de1152a 100644 (file)
@@ -42,7 +42,7 @@ config SPARC64
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KRETPROBES
        select HAVE_KPROBES
-       select HAVE_LMB
+       select HAVE_MEMBLOCK
        select HAVE_SYSCALL_WRAPPERS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/sparc/include/asm/lmb.h b/arch/sparc/include/asm/lmb.h
deleted file mode 100644 (file)
index 6a352cb..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _SPARC64_LMB_H
-#define _SPARC64_LMB_H
-
-#include <asm/oplib.h>
-
-#define LMB_DBG(fmt...) prom_printf(fmt)
-
-#define LMB_REAL_LIMIT 0
-
-#endif /* !(_SPARC64_LMB_H) */
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h
new file mode 100644 (file)
index 0000000..f12af88
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _SPARC64_MEMBLOCK_H
+#define _SPARC64_MEMBLOCK_H
+
+#include <asm/oplib.h>
+
+#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
+
+#define MEMBLOCK_REAL_LIMIT    0
+
+#endif /* !(_SPARC64_MEMBLOCK_H) */
index cdc91d919e93c36219e19b8a58d9ea32de721e3b..83e85c2e802ad6a075b08d0d0e5dfccb50e81108 100644 (file)
@@ -4,7 +4,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/log2.h>
 #include <linux/list.h>
 #include <linux/slab.h>
@@ -86,7 +86,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
        hp->handle_size = handle_size;
 }
 
-static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
+static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
 {
        unsigned int handle_size, alloc_size;
        struct mdesc_handle *hp;
@@ -97,7 +97,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
                       mdesc_size);
        alloc_size = PAGE_ALIGN(handle_size);
 
-       paddr = lmb_alloc(alloc_size, PAGE_SIZE);
+       paddr = memblock_alloc(alloc_size, PAGE_SIZE);
 
        hp = NULL;
        if (paddr) {
@@ -107,7 +107,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
        return hp;
 }
 
-static void mdesc_lmb_free(struct mdesc_handle *hp)
+static void mdesc_memblock_free(struct mdesc_handle *hp)
 {
        unsigned int alloc_size;
        unsigned long start;
@@ -120,9 +120,9 @@ static void mdesc_lmb_free(struct mdesc_handle *hp)
        free_bootmem_late(start, alloc_size);
 }
 
-static struct mdesc_mem_ops lmb_mdesc_ops = {
-       .alloc = mdesc_lmb_alloc,
-       .free  = mdesc_lmb_free,
+static struct mdesc_mem_ops memblock_mdesc_ops = {
+       .alloc = mdesc_memblock_alloc,
+       .free  = mdesc_memblock_free,
 };
 
 static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
@@ -914,7 +914,7 @@ void __init sun4v_mdesc_init(void)
 
        printk("MDESC: Size is %lu bytes.\n", len);
 
-       hp = mdesc_alloc(len, &lmb_mdesc_ops);
+       hp = mdesc_alloc(len, &memblock_mdesc_ops);
        if (hp == NULL) {
                prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
                prom_halt();
index fb06ac2bd38ffbb198080dc9006775fdab0b55f1..466a32763ea82fbe0bd644fb5468459a7fe4196f 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/of_device.h>
 
 #include <asm/prom.h>
@@ -34,7 +34,7 @@
 
 void * __init prom_early_alloc(unsigned long size)
 {
-       unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES);
+       unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
        void *ret;
 
        if (!paddr) {
index b2831dc3c121c135a0ed41fe09aaf975f1aa678f..f0434513df159301da052662442b281bea30b5c5 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/cache.h>
 #include <linux/sort.h>
 #include <linux/percpu.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <linux/mmzone.h>
 #include <linux/gfp.h>
 
@@ -726,7 +726,7 @@ static void __init find_ramdisk(unsigned long phys_base)
                initrd_start = ramdisk_image;
                initrd_end = ramdisk_image + sparc_ramdisk_size;
 
-               lmb_reserve(initrd_start, sparc_ramdisk_size);
+               memblock_reserve(initrd_start, sparc_ramdisk_size);
 
                initrd_start += PAGE_OFFSET;
                initrd_end += PAGE_OFFSET;
@@ -822,7 +822,7 @@ static void __init allocate_node_data(int nid)
        struct pglist_data *p;
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
-       paddr = lmb_alloc_nid(sizeof(struct pglist_data),
+       paddr = memblock_alloc_nid(sizeof(struct pglist_data),
                              SMP_CACHE_BYTES, nid, nid_range);
        if (!paddr) {
                prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
@@ -843,7 +843,7 @@ static void __init allocate_node_data(int nid)
        if (p->node_spanned_pages) {
                num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
 
-               paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
+               paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
                                      nid_range);
                if (!paddr) {
                        prom_printf("Cannot allocate bootmap for nid[%d]\n",
@@ -974,11 +974,11 @@ static void __init add_node_ranges(void)
 {
        int i;
 
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               unsigned long size = lmb_size_bytes(&lmb.memory, i);
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               unsigned long size = memblock_size_bytes(&memblock.memory, i);
                unsigned long start, end;
 
-               start = lmb.memory.region[i].base;
+               start = memblock.memory.region[i].base;
                end = start + size;
                while (start < end) {
                        unsigned long this_end;
@@ -1010,7 +1010,7 @@ static int __init grab_mlgroups(struct mdesc_handle *md)
        if (!count)
                return -ENOENT;
 
-       paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup),
+       paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
                          SMP_CACHE_BYTES);
        if (!paddr)
                return -ENOMEM;
@@ -1051,7 +1051,7 @@ static int __init grab_mblocks(struct mdesc_handle *md)
        if (!count)
                return -ENOENT;
 
-       paddr = lmb_alloc(count * sizeof(struct mdesc_mblock),
+       paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
                          SMP_CACHE_BYTES);
        if (!paddr)
                return -ENOMEM;
@@ -1279,8 +1279,8 @@ static int bootmem_init_numa(void)
 
 static void __init bootmem_init_nonnuma(void)
 {
-       unsigned long top_of_ram = lmb_end_of_DRAM();
-       unsigned long total_ram = lmb_phys_mem_size();
+       unsigned long top_of_ram = memblock_end_of_DRAM();
+       unsigned long total_ram = memblock_phys_mem_size();
        unsigned int i;
 
        numadbg("bootmem_init_nonnuma()\n");
@@ -1292,15 +1292,15 @@ static void __init bootmem_init_nonnuma(void)
 
        init_node_masks_nonnuma();
 
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               unsigned long size = lmb_size_bytes(&lmb.memory, i);
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               unsigned long size = memblock_size_bytes(&memblock.memory, i);
                unsigned long start_pfn, end_pfn;
 
                if (!size)
                        continue;
 
-               start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
-               end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+               start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
+               end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
                add_active_range(0, start_pfn, end_pfn);
        }
 
@@ -1338,9 +1338,9 @@ static void __init trim_reserved_in_node(int nid)
 
        numadbg("  trim_reserved_in_node(%d)\n", nid);
 
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               unsigned long start = lmb.reserved.region[i].base;
-               unsigned long size = lmb_size_bytes(&lmb.reserved, i);
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               unsigned long start = memblock.reserved.region[i].base;
+               unsigned long size = memblock_size_bytes(&memblock.reserved, i);
                unsigned long end = start + size;
 
                reserve_range_in_node(nid, start, end);
@@ -1384,7 +1384,7 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
        unsigned long end_pfn;
        int nid;
 
-       end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
        max_pfn = max_low_pfn = end_pfn;
        min_low_pfn = (phys_base >> PAGE_SHIFT);
 
@@ -1734,7 +1734,7 @@ void __init paging_init(void)
                sun4v_ktsb_init();
        }
 
-       lmb_init();
+       memblock_init();
 
        /* Find available physical memory...
         *
@@ -1752,17 +1752,17 @@ void __init paging_init(void)
        phys_base = 0xffffffffffffffffUL;
        for (i = 0; i < pavail_ents; i++) {
                phys_base = min(phys_base, pavail[i].phys_addr);
-               lmb_add(pavail[i].phys_addr, pavail[i].reg_size);
+               memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
        }
 
-       lmb_reserve(kern_base, kern_size);
+       memblock_reserve(kern_base, kern_size);
 
        find_ramdisk(phys_base);
 
-       lmb_enforce_memory_limit(cmdline_memory_size);
+       memblock_enforce_memory_limit(cmdline_memory_size);
 
-       lmb_analyze();
-       lmb_dump_all();
+       memblock_analyze();
+       memblock_dump_all();
 
        set_bit(0, mmu_context_bmap);
 
@@ -1816,8 +1816,8 @@ void __init paging_init(void)
         */
        for_each_possible_cpu(i) {
                /* XXX Use node local allocations... XXX */
-               softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
-               hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+               softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+               hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
        }
 
        /* Setup bootmem... */
index 519b54327d750f5e3580ed45cc68e78272c1ba81..baa579c8e03839cbb8d54fb7b2b4d3b7d700742c 100644 (file)
@@ -142,6 +142,7 @@ struct x86_cpuinit_ops {
  * @set_wallclock:             set time back to HW clock
  * @is_untracked_pat_range     exclude from PAT logic
  * @nmi_init                   enable NMI on cpus
+ * @i8042_detect               pre-detect if i8042 controller exists
  */
 struct x86_platform_ops {
        unsigned long (*calibrate_tsc)(void);
@@ -150,6 +151,7 @@ struct x86_platform_ops {
        void (*iommu_shutdown)(void);
        bool (*is_untracked_pat_range)(u64 start, u64 end);
        void (*nmi_init)(void);
+       int (*i8042_detect)(void);
 };
 
 extern struct x86_init_ops x86_init;
index c02cc692985c13b902f1a9c914535170f9670d01..a96489ee6cabf04a53664300f87141c4bbffc293 100644 (file)
@@ -921,7 +921,7 @@ void disable_local_APIC(void)
        unsigned int value;
 
        /* APIC hasn't been mapped yet */
-       if (!apic_phys)
+       if (!x2apic_mode && !apic_phys)
                return;
 
        clear_local_APIC();
index ebdb85cf2686fa36702cd4d50b657f22de85b3bd..e5cc7e82e60ddbf1bd1ca2871fdb7d7fc7628e34 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/apic.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
+#include <asm/hpet.h>
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -191,6 +192,21 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 }
 #endif
 
+/*
+ * Force the read back of the CMP register in hpet_next_event()
+ * to work around the problem that the CMP register write seems to be
+ * delayed. See hpet_next_event() for details.
+ *
+ * We do this on all SMBUS incarnations for now until we have more
+ * information about the affected chipsets.
+ */
+static void __init ati_hpet_bugs(int num, int slot, int func)
+{
+#ifdef CONFIG_HPET_TIMER
+       hpet_readback_cmp = 1;
+#endif
+}
+
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
 #define QFLAG_DONE             (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -220,6 +236,8 @@ static struct chipset early_qrk[] __initdata = {
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
        { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
+       { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+         PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
        {}
 };
 
index 345a4b1fe1446812d65e25fd424886d05aeb1fe4..675879b65ce666c91b868c96972ea35f107810f4 100644 (file)
@@ -640,8 +640,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        /* Skip cs, ip, orig_ax and gs. */      \
        "       subl $16, %esp\n"       \
        "       pushl %fs\n"            \
-       "       pushl %ds\n"            \
        "       pushl %es\n"            \
+       "       pushl %ds\n"            \
        "       pushl %eax\n"           \
        "       pushl %ebp\n"           \
        "       pushl %edi\n"           \
index e796448f0eb5e7a156600d690a32807e3b7b39fa..5915e0b333033471fa27c8056a6dae2795bbe6ee 100644 (file)
@@ -216,6 +216,12 @@ static void __init mrst_setup_boot_clock(void)
                setup_boot_APIC_clock();
 };
 
+/* MID systems don't have i8042 controller */
+static int mrst_i8042_detect(void)
+{
+       return 0;
+}
+
 /*
  * Moorestown specific x86_init function overrides and early setup
  * calls.
@@ -233,6 +239,7 @@ void __init x86_mrst_early_setup(void)
        x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock;
 
        x86_platform.calibrate_tsc = mrst_calibrate_tsc;
+       x86_platform.i8042_detect = mrst_i8042_detect;
        x86_init.pci.init = pci_mrst_init;
        x86_init.pci.fixup_irqs = x86_init_noop;
 
index e72d3fc6547d6fe767785e869deb90ea683d8cdc..939b9e98245f733262cdee8198daa08498abf87e 100644 (file)
@@ -498,15 +498,10 @@ void force_hpet_resume(void)
  * See erratum #27 (Misinterpreted MSI Requests May Result in
  * Corrupted LPC DMA Data) in AMD Publication #46837,
  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
- *
- * Also force the read back of the CMP register in hpet_next_event()
- * to work around the problem that the CMP register write seems to be
- * delayed. See hpet_next_event() for details.
  */
 static void force_disable_hpet_msi(struct pci_dev *unused)
 {
        hpet_msi_disable = 1;
-       hpet_readback_cmp = 1;
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
index de3b63ae3da26300297e315a6f7a9b5157d429ee..690c2c09faf3258ab1a7d8b70a446b460c383fe7 100644 (file)
@@ -238,6 +238,15 @@ void __init setup_per_cpu_areas(void)
 #ifdef CONFIG_NUMA
                per_cpu(x86_cpu_to_node_map, cpu) =
                        early_per_cpu_map(x86_cpu_to_node_map, cpu);
+               /*
+                * Ensure taht the boot cpu numa_node is correct when the boot
+                * cpu is on a node that doesn't have memory installed.
+                * Also cpu_up() will call cpu_to_node() for APs when
+                * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
+                * up later with c_init aka intel_init/amd_init.
+                * So set them all (boot cpu and all APs).
+                */
+               set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
 #endif
 #endif
                /*
@@ -257,14 +266,6 @@ void __init setup_per_cpu_areas(void)
        early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
 #endif
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
-       /*
-        * make sure boot cpu numa_node is right, when boot cpu is on the
-        * node that doesn't have mem installed
-        */
-       set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
-#endif
-
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
 
index 61a1e8c7e19f8169e736bd329e9b2b72c87c3cce..cd6da6bf3ecaf7c24b9b9848c31d313c741d0f16 100644 (file)
@@ -5,6 +5,7 @@
  */
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/module.h>
 
 #include <asm/bios_ebda.h>
 #include <asm/paravirt.h>
@@ -85,6 +86,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
 };
 
 static void default_nmi_init(void) { };
+static int default_i8042_detect(void) { return 1; };
 
 struct x86_platform_ops x86_platform = {
        .calibrate_tsc                  = native_calibrate_tsc,
@@ -92,5 +94,8 @@ struct x86_platform_ops x86_platform = {
        .set_wallclock                  = mach_set_rtc_mmss,
        .iommu_shutdown                 = iommu_shutdown_noop,
        .is_untracked_pat_range         = is_ISA_range,
-       .nmi_init                       = default_nmi_init
+       .nmi_init                       = default_nmi_init,
+       .i8042_detect                   = default_i8042_detect
 };
+
+EXPORT_SYMBOL_GPL(x86_platform);
index a6f695d76928675008a99f2030b00299856840c4..b1ed0a1a591338c801d49268c2f784477fd4a0a1 100644 (file)
@@ -1879,6 +1879,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        pgprintk("hfn old %lx new %lx\n",
                                 spte_to_pfn(*sptep), pfn);
                        rmap_remove(vcpu->kvm, sptep);
+                       __set_spte(sptep, shadow_trap_nonpresent_pte);
+                       kvm_flush_remote_tlbs(vcpu->kvm);
                } else
                        was_rmapped = 1;
        }
@@ -2924,7 +2926,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
        return kvm_mmu_zap_page(kvm, page) + 1;
 }
 
-static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        struct kvm *kvm;
        struct kvm *kvm_freed = NULL;
index 859a01a07dbfb2937b8f0792e4a4aa0bbe50767b..ee03679efe788d61e73d76f818931d8c1a45fad2 100644 (file)
@@ -1744,18 +1744,15 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
                             (guest_tr_ar & ~AR_TYPE_MASK)
                             | AR_TYPE_BUSY_64_TSS);
        }
-       vcpu->arch.efer |= EFER_LMA;
-       vmx_set_efer(vcpu, vcpu->arch.efer);
+       vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
 }
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.efer &= ~EFER_LMA;
-
        vmcs_write32(VM_ENTRY_CONTROLS,
                     vmcs_read32(VM_ENTRY_CONTROLS)
                     & ~VM_ENTRY_IA32E_MODE);
-       vmx_set_efer(vcpu, vcpu->arch.efer);
+       vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
 }
 
 #endif
index 6fdb3ec30c3197e15fc54e18c91291f5eb403450..55253095be84c66d37c5dcaaba63bd4ebaee6df2 100644 (file)
@@ -184,6 +184,7 @@ static void __init pcibios_allocate_resources(int pass)
                                        idx, r, disabled, pass);
                                if (pci_claim_resource(dev, idx) < 0) {
                                        /* We'll assign a new address later */
+                                       dev->fw_addr[idx] = r->start;
                                        r->end -= r->start;
                                        r->start = 0;
                                }
index 7ef3a2735df39f2fdfbd4624a440e22d3e169879..cb29191cee5877824391a33de0cea9c5255f2c55 100644 (file)
@@ -66,8 +66,9 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
                                          devfn, pos, 4, &pcie_cap))
                        return 0;
 
-               if (pcie_cap == 0xffffffff)
-                       return 0;
+               if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 ||
+                       PCI_EXT_CAP_ID(pcie_cap) == 0xffff)
+                       break;
 
                if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
                        raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
@@ -76,7 +77,7 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
                                return pos;
                }
 
-               pos = pcie_cap >> 20;
+               pos = PCI_EXT_CAP_NEXT(pcie_cap);
        }
 
        return 0;
index 98a66103f4f23b1aafaec67e66116e74e1bf0c8c..a854df2a5a4b860b223d6ad49f45255b0678e970 100644 (file)
@@ -165,7 +165,7 @@ static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 
        p = kmalloc(n, GFP_ATOMIC);
        if (!p)
-               ablkcipher_walk_done(req, walk, -ENOMEM);
+               return ablkcipher_walk_done(req, walk, -ENOMEM);
 
        base = p + 1;
 
index d7be69f131546b02a1784ac750e4f3dc042e10d1..b7dab32ce63cd883974ac0e9b3646d301460cc7e 100644 (file)
@@ -194,6 +194,6 @@ err_timer:
 
 module_init(cs5535_mfgpt_init);
 
-MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver");
 MODULE_LICENSE("GPL");
index aedef7941b22855f17c892f73d370ae10f17632c..0d2f9dbb47e4fa6d3d3a4440b8427fe544407af3 100644 (file)
@@ -209,7 +209,7 @@ config EDAC_I5100
 
 config EDAC_MPC85XX
        tristate "Freescale MPC83xx / MPC85xx"
-       depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || MPC85xx)
+       depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx)
        help
          Support for error detection and correction on the Freescale
          MPC8349, MPC8560, MPC8540, MPC8548
index 52ca09bf4726ae9cb5984d38c79d86705c5b51f8..f39b00a46eda36c1d6e6d265e7aa02245d012556 100644 (file)
@@ -1120,6 +1120,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
        { .compatible = "fsl,mpc8555-memory-controller", },
        { .compatible = "fsl,mpc8560-memory-controller", },
        { .compatible = "fsl,mpc8568-memory-controller", },
+       { .compatible = "fsl,mpc8569-memory-controller", },
        { .compatible = "fsl,mpc8572-memory-controller", },
        { .compatible = "fsl,mpc8349-memory-controller", },
        { .compatible = "fsl,p2020-memory-controller", },
index f73a1555e49d7d92fad5da024c4face448a8e114..e23c06893d19f62195247220075aafce1cfa75c0 100644 (file)
@@ -352,6 +352,6 @@ static void __exit cs5535_gpio_exit(void)
 module_init(cs5535_gpio_init);
 module_exit(cs5535_gpio_exit);
 
-MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
 MODULE_LICENSE("GPL");
index 43ce3809ef6419bdefa31e8731dae359e30836ca..51bd301cf10d9002555177d95b28e86703145d67 100644 (file)
@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
                page = read_cache_page_gfp(mapping, i,
                                           GFP_HIGHUSER |
                                           __GFP_COLD |
+                                          __GFP_RECLAIMABLE |
                                           gfpmask);
                if (IS_ERR(page))
                        goto err_pages;
@@ -4987,7 +4988,7 @@ i915_gpu_is_active(struct drm_device *dev)
 }
 
 static int
-i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        drm_i915_private_t *dev_priv, *next_dev;
        struct drm_i915_gem_object *obj_priv, *next_obj;
index 0f9a4785d7980feca1cc65d5b7504906535ea5f2..3525f533e186d3f3cf0e7990d9efa4af90a66d85 100644 (file)
@@ -69,11 +69,11 @@ config KEYBOARD_ATARI
          module will be called atakbd.
 
 config KEYBOARD_ATKBD
-       tristate "AT keyboard" if EMBEDDED || !X86 || X86_MRST
+       tristate "AT keyboard" if EMBEDDED || !X86
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86 && !X86_MRST
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
index eeb58c1cac164a65b6d2c0a15f7e40981aec16cd..c714ca2407f8a190d247f3f8eb21d96edb577ac3 100644 (file)
@@ -17,7 +17,7 @@ config MOUSE_PS2
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86 && !X86_MRST
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you have a PS/2 mouse connected to your system. This
index 40cea334ad13f8159d0f71057680496e42150791..9ba9c4a17e1541a48a5f8a5a90606d6d3aecb02e 100644 (file)
@@ -206,6 +206,7 @@ static int synaptics_resolution(struct psmouse *psmouse)
        unsigned char max[3];
 
        if (SYN_ID_MAJOR(priv->identity) < 4)
+               return 0;
 
        if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) {
                if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) {
index 256b9e9394dc805b2062f1e70f3f7abd24814647..3bfe8fafc6adac421577bc17c4da964537a4145f 100644 (file)
@@ -22,7 +22,7 @@ config SERIO_I8042
        tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
        default y
        depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
-                  (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !X86_MRST
+                  (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
        help
          i8042 is the chip over which the standard AT keyboard and PS/2
          mouse are connected to the computer. If you use these devices,
index 6168469ad1a69167c5fe77c5ef4ce8afedbf4be7..81003c4739f4f32351566759b9225fdfb974a7b7 100644 (file)
@@ -7,6 +7,10 @@
  * the Free Software Foundation.
  */
 
+#ifdef CONFIG_X86
+#include <asm/x86_init.h>
+#endif
+
 /*
  * Names.
  */
@@ -840,6 +844,12 @@ static int __init i8042_platform_init(void)
 {
        int retval;
 
+#ifdef CONFIG_X86
+       /* Just return if pre-detection shows no i8042 controller exist */
+       if (!x86_platform.i8042_detect())
+               return -ENODEV;
+#endif
+
 /*
  * On ix86 platforms touching the i8042 data register region can do really
  * bad things. Because of this the region is always reserved on ix86 boxes.
index cc18265be1a8f2266480f93841c0530d3e180cea..7a45d68c35166d62d2e2404d3f65644b32cea42c 100644 (file)
@@ -233,7 +233,7 @@ static int __devinit w90x900ts_probe(struct platform_device *pdev)
        w90p910_ts->state = TS_IDLE;
        spin_lock_init(&w90p910_ts->lock);
        setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
-                   (unsigned long)&w90p910_ts);
+                   (unsigned long)w90p910_ts);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
index 9bec24db4d41385efe8a1c891f0a6e9daf072488..2d44b3300104eb183f902212bf281849dce00186 100644 (file)
@@ -366,6 +366,6 @@ static int __init cs5535_mfgpt_init(void)
 
 module_init(cs5535_mfgpt_init);
 
-MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
 MODULE_LICENSE("GPL");
index af217924a76eb94511fddf7789d5d1a458a2c21b..ad30f074ee151eead0ebe435037fde70055f1d20 100644 (file)
@@ -365,6 +365,26 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
 
 static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
 {
+       struct sdhci_host *host =  platform_get_drvdata(pdev);
+       struct sdhci_s3c *sc = sdhci_priv(host);
+       int ptr;
+
+       sdhci_remove_host(host, 1);
+
+       for (ptr = 0; ptr < 3; ptr++) {
+               clk_disable(sc->clk_bus[ptr]);
+               clk_put(sc->clk_bus[ptr]);
+       }
+       clk_disable(sc->clk_io);
+       clk_put(sc->clk_io);
+
+       iounmap(host->ioaddr);
+       release_resource(sc->ioarea);
+       kfree(sc->ioarea);
+
+       sdhci_free_host(host);
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
index 7acb3edc47ef88ba62a2de614213a85518b98274..2602852cc55a6037c5160575f3620632397fcd2c 100644 (file)
@@ -677,7 +677,7 @@ static int ibmveth_close(struct net_device *netdev)
        if (!adapter->pool_config)
                netif_stop_queue(netdev);
 
-       free_irq(netdev->irq, netdev);
+       h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 
        do {
                lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
@@ -689,6 +689,8 @@ static int ibmveth_close(struct net_device *netdev)
                                     lpar_rc);
        }
 
+       free_irq(netdev->irq, netdev);
+
        adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
 
        ibmveth_cleanup(adapter);
index 5b3dfb4ab27985cd8823c08035dd066da36e4d1c..33525bf2a3d3e87f6769ac24c65861441c375dd3 100644 (file)
@@ -1168,6 +1168,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
        int interrupts, nr_serviced = 0, i;
        struct ei_device *ei_local;
        int handled = 0;
+       unsigned long flags;
 
        e8390_base = dev->base_addr;
        ei_local = netdev_priv(dev);
@@ -1176,7 +1177,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
         *      Protect the irq test too.
         */
         
-       spin_lock(&ei_local->page_lock);
+       spin_lock_irqsave(&ei_local->page_lock, flags);
 
        if (ei_local->irqlock) 
        {
@@ -1188,7 +1189,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
                           dev->name, inb_p(e8390_base + EN0_ISR),
                           inb_p(e8390_base + EN0_IMR));
 #endif
-               spin_unlock(&ei_local->page_lock);
+               spin_unlock_irqrestore(&ei_local->page_lock, flags);
                return IRQ_NONE;
        }
     
@@ -1261,7 +1262,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
        ei_local->irqlock = 0;
        outb_p(ENISR_ALL, e8390_base + EN0_IMR);
 
-       spin_unlock(&ei_local->page_lock);
+       spin_unlock_irqrestore(&ei_local->page_lock, flags);
        return IRQ_RETVAL(handled);
 }
 
index 96b6cfbf0a3a682b14216fa23a9e737a48973aaa..cdc6a5c2e70d81955efec9d3c633a593ef943b16 100644 (file)
@@ -1316,7 +1316,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
                { 0x7c800000, 0x28000000,       RTL_GIGA_MAC_VER_26 },
 
                /* 8168C family. */
-               { 0x7cf00000, 0x3ca00000,       RTL_GIGA_MAC_VER_24 },
+               { 0x7cf00000, 0x3cb00000,       RTL_GIGA_MAC_VER_24 },
                { 0x7cf00000, 0x3c900000,       RTL_GIGA_MAC_VER_23 },
                { 0x7cf00000, 0x3c800000,       RTL_GIGA_MAC_VER_18 },
                { 0x7c800000, 0x3c800000,       RTL_GIGA_MAC_VER_24 },
index 77b359162d6cad0a408754fff4cc8b9af64d3063..23c15aa9fbd5167c51e80601eb5213c0dd144283 100644 (file)
@@ -730,13 +730,17 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
 
        /* RX */
        if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
-               goto err;
+               goto err_rx;
 
        /* Register Read */
        if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
-               goto err;
+               goto err_reg;
 
        return 0;
+err_reg:
+       ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+err_rx:
+       ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
 err:
        return -ENOMEM;
 }
index d24dc7dc072328fcecab649193ed4cbad0c17caa..972a9c3af39e5e7ad11dafa354b5f7314083a985 100644 (file)
@@ -330,6 +330,7 @@ static int prism2_pci_probe(struct pci_dev *pdev,
 
         dev->irq = pdev->irq;
         hw_priv->mem_start = mem;
+       dev->base_addr = (unsigned long) mem;
 
        prism2_pci_cor_sreset(local);
 
index c2a453a1a9917e4e6bb9124b4af0318416ff0f34..dc43ebd1f1fd2eb3012c900456ec30b35b9fbbb5 100644 (file)
@@ -97,6 +97,17 @@ static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
        spin_lock_irqsave(&priv->sta_lock, flags);
        memset(priv->stations, 0, sizeof(priv->stations));
        priv->num_stations = 0;
+
+       /*
+        * Remove all key information that is not stored as part of station
+        * information since mac80211 may not have had a
+        * chance to remove all the keys. When device is reconfigured by
+        * mac80211 after an error all keys will be reconfigured.
+        */
+       priv->ucode_key_table = 0;
+       priv->key_mapping_key = 0;
+       memset(priv->wep_keys, 0, sizeof(priv->wep_keys));
+
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
 
index 3ae468c4d7604b3b8e82eac570445bdbf098ab7a..f20d3eeeea7fe0ee4001bf3597255bab1ad9856e 100644 (file)
@@ -853,6 +853,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                    BIT(NL80211_IFTYPE_MESH_POINT) |
                    BIT(NL80211_IFTYPE_WDS);
 
+       /*
+        * Initialize configuration work.
+        */
+       INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
+
        /*
         * Let the driver probe the device to detect the capabilities.
         */
@@ -862,11 +867,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                goto exit;
        }
 
-       /*
-        * Initialize configuration work.
-        */
-       INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
-
        /*
         * Allocate queue array.
         */
index 92379e2d37e77ed4797dba23b7402f795dec80e2..2aaa13150de3ba40fd0a65cbb569f1fe96b9a49f 100644 (file)
@@ -156,6 +156,38 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
                                             pcibios_align_resource, dev);
        }
 
+       if (ret < 0 && dev->fw_addr[resno]) {
+               struct resource *root, *conflict;
+               resource_size_t start, end;
+
+               /*
+                * If we failed to assign anything, let's try the address
+                * where firmware left it.  That at least has a chance of
+                * working, which is better than just leaving it disabled.
+                */
+
+               if (res->flags & IORESOURCE_IO)
+                       root = &ioport_resource;
+               else
+                       root = &iomem_resource;
+
+               start = res->start;
+               end = res->end;
+               res->start = dev->fw_addr[resno];
+               res->end = res->start + size - 1;
+               dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
+                        resno, res);
+               conflict = request_resource_conflict(root, res);
+               if (conflict) {
+                       dev_info(&dev->dev,
+                                "BAR %d: %pR conflicts with %s %pR\n", resno,
+                                res, conflict->name, conflict);
+                       res->start = start;
+                       res->end = end;
+               } else
+                       ret = 0;
+       }
+
        if (!ret) {
                res->flags &= ~IORESOURCE_STARTALIGN;
                dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
index 40658e3385b45346ddc91132816e28ccfee4dd84..bb2f1fba637b36ec041909a26dd176be6f472c0a 100644 (file)
@@ -489,7 +489,7 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
                mutex_unlock(&ipclock);
                return -ENODEV;
        }
-       ipc_command(cmd << 12 | sub);
+       ipc_command(sub << 12 | cmd);
        err = busy_loop();
        mutex_unlock(&ipclock);
        return err;
@@ -501,9 +501,9 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
  *     @cmd: command
  *     @sub: sub type
  *     @in: input data
- *     @inlen: input length
+ *     @inlen: input length in dwords
  *     @out: output data
- *     @outlein: output length
+ *     @outlein: output length in dwords
  *
  *     Issue a command to the SCU which involves data transfers. Do the
  *     data copies under the lock but leave it for the caller to interpret
@@ -524,7 +524,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
        for (i = 0; i < inlen; i++)
                ipc_data_writel(*in++, 4 * i);
 
-       ipc_command((cmd << 12) | sub | (inlen << 18));
+       ipc_command((sub << 12) | cmd | (inlen << 18));
        err = busy_loop();
 
        for (i = 0; i < outlen; i++)
@@ -556,6 +556,10 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
        u32 cmd = 0;
 
        mutex_lock(&ipclock);
+       if (ipcdev.pdev == NULL) {
+               mutex_unlock(&ipclock);
+               return -ENODEV;
+       }
        cmd = (addr >> 24) & 0xFF;
        if (cmd == IPC_I2C_READ) {
                writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
index d762a0cbc6af40b2c4aa58bde75e18e514fcd658..2afbeec8b7913f504a04beed44fc9ddee703c51b 100644 (file)
@@ -163,7 +163,7 @@ static int ds2782_get_capacity(struct ds278x_info *info, int *capacity)
        if (err)
                return err;
        *capacity = raw;
-       return raw;
+       return 0;
 }
 
 static int ds2786_get_current(struct ds278x_info *info, int *current_uA)
index 34d51dd4c53902d79ef3c21434db61923ee79fc8..bed7b4634ccd4648f42c188d8e5e2937534b1f8e 100644 (file)
@@ -948,8 +948,10 @@ static ssize_t dasd_alias_show(struct device *dev,
        if (device->discipline && device->discipline->get_uid &&
            !device->discipline->get_uid(device, &uid)) {
                if (uid.type == UA_BASE_PAV_ALIAS ||
-                   uid.type == UA_HYPER_PAV_ALIAS)
+                   uid.type == UA_HYPER_PAV_ALIAS) {
+                       dasd_put_device(device);
                        return sprintf(buf, "1\n");
+               }
        }
        dasd_put_device(device);
 
index ce7cb87479fe3b8aed786eeea182549da7352b35..407d0e9adfaf96a0d4362f8e56c986f0c17700fa 100644 (file)
@@ -713,7 +713,7 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
        ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
        if (ret)
                goto out_free;
-       memcpy(desc, &chsc_resp->data, chsc_resp->length);
+       memcpy(desc, &chsc_resp->data, sizeof(*desc));
 out_free:
        kfree(chsc_resp);
        return ret;
index ffa111a7e9d443f23cda4287eeeb17d53d323a46..97ab0a81338adf036bea9e706dbcbe552b6d607c 100644 (file)
@@ -66,28 +66,6 @@ struct mpc8xxx_spi_reg {
        __be32 receive;
 };
 
-/* SPI Parameter RAM */
-struct spi_pram {
-       __be16  rbase;  /* Rx Buffer descriptor base address */
-       __be16  tbase;  /* Tx Buffer descriptor base address */
-       u8      rfcr;   /* Rx function code */
-       u8      tfcr;   /* Tx function code */
-       __be16  mrblr;  /* Max receive buffer length */
-       __be32  rstate; /* Internal */
-       __be32  rdp;    /* Internal */
-       __be16  rbptr;  /* Internal */
-       __be16  rbc;    /* Internal */
-       __be32  rxtmp;  /* Internal */
-       __be32  tstate; /* Internal */
-       __be32  tdp;    /* Internal */
-       __be16  tbptr;  /* Internal */
-       __be16  tbc;    /* Internal */
-       __be32  txtmp;  /* Internal */
-       __be32  res;    /* Tx temp. */
-       __be16  rpbase; /* Relocation pointer (CPM1 only) */
-       __be16  res1;   /* Reserved */
-};
-
 /* SPI Controller mode register definitions */
 #define        SPMODE_LOOP             (1 << 30)
 #define        SPMODE_CI_INACTIVEHIGH  (1 << 29)
index 57a593c58cf418769537c06d4602910203f8e5e0..d219070fed3da07b3944ac53363673f310435f33 100644 (file)
@@ -177,8 +177,8 @@ static void handle_tx(struct vhost_net *net)
                        break;
                }
                if (err != len)
-                       pr_err("Truncated TX packet: "
-                              " len %d != %zd\n", err, len);
+                       pr_debug("Truncated TX packet: "
+                                " len %d != %zd\n", err, len);
                vhost_add_used_and_signal(&net->dev, vq, head, 0);
                total_len += len;
                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
@@ -275,8 +275,8 @@ static void handle_rx(struct vhost_net *net)
                }
                /* TODO: Should check and handle checksum. */
                if (err > len) {
-                       pr_err("Discarded truncated rx packet: "
-                              " len %d > %zd\n", err, len);
+                       pr_debug("Discarded truncated rx packet: "
+                                " len %d > %zd\n", err, len);
                        vhost_discard_vq_desc(vq);
                        continue;
                }
@@ -534,11 +534,16 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        rcu_assign_pointer(vq->private_data, sock);
        vhost_net_enable_vq(n, vq);
 done:
+       mutex_unlock(&vq->mutex);
+
        if (oldsock) {
                vhost_net_flush_vq(n, index);
                fput(oldsock->file);
        }
 
+       mutex_unlock(&n->dev.mutex);
+       return 0;
+
 err_vq:
        mutex_unlock(&vq->mutex);
 err:
index 0d1d966b0fe45d20e6ff75f44a6dea64f66eb54e..c3df14ce2cc2c00d232028d1238121ff9c37e35c 100644 (file)
@@ -2304,12 +2304,17 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root,
        return ret;
 }
 
+/*
+ * min slot controls the lowest index we're willing to push to the
+ * right.  We'll push up to and including min_slot, but no lower
+ */
 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
                                      struct btrfs_root *root,
                                      struct btrfs_path *path,
                                      int data_size, int empty,
                                      struct extent_buffer *right,
-                                     int free_space, u32 left_nritems)
+                                     int free_space, u32 left_nritems,
+                                     u32 min_slot)
 {
        struct extent_buffer *left = path->nodes[0];
        struct extent_buffer *upper = path->nodes[1];
@@ -2327,7 +2332,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
        if (empty)
                nr = 0;
        else
-               nr = 1;
+               nr = max_t(u32, 1, min_slot);
 
        if (path->slots[0] >= left_nritems)
                push_space += data_size;
@@ -2469,10 +2474,14 @@ out_unlock:
  *
  * returns 1 if the push failed because the other node didn't have enough
  * room, 0 if everything worked out and < 0 if there were major errors.
+ *
+ * this will push starting from min_slot to the end of the leaf.  It won't
+ * push any slot lower than min_slot
  */
 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
-                          *root, struct btrfs_path *path, int data_size,
-                          int empty)
+                          *root, struct btrfs_path *path,
+                          int min_data_size, int data_size,
+                          int empty, u32 min_slot)
 {
        struct extent_buffer *left = path->nodes[0];
        struct extent_buffer *right;
@@ -2514,8 +2523,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
        if (left_nritems == 0)
                goto out_unlock;
 
-       return __push_leaf_right(trans, root, path, data_size, empty,
-                               right, free_space, left_nritems);
+       return __push_leaf_right(trans, root, path, min_data_size, empty,
+                               right, free_space, left_nritems, min_slot);
 out_unlock:
        btrfs_tree_unlock(right);
        free_extent_buffer(right);
@@ -2525,12 +2534,17 @@ out_unlock:
 /*
  * push some data in the path leaf to the left, trying to free up at
  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
+ *
+ * max_slot can put a limit on how far into the leaf we'll push items.  The
+ * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
+ * items
  */
 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
                                     struct btrfs_path *path, int data_size,
                                     int empty, struct extent_buffer *left,
-                                    int free_space, int right_nritems)
+                                    int free_space, u32 right_nritems,
+                                    u32 max_slot)
 {
        struct btrfs_disk_key disk_key;
        struct extent_buffer *right = path->nodes[0];
@@ -2549,9 +2563,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
        slot = path->slots[1];
 
        if (empty)
-               nr = right_nritems;
+               nr = min(right_nritems, max_slot);
        else
-               nr = right_nritems - 1;
+               nr = min(right_nritems - 1, max_slot);
 
        for (i = 0; i < nr; i++) {
                item = btrfs_item_nr(right, i);
@@ -2712,10 +2726,14 @@ out:
 /*
  * push some data in the path leaf to the left, trying to free up at
  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
+ *
+ * max_slot can put a limit on how far into the leaf we'll push items.  The
+ * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
+ * items
  */
 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
-                         *root, struct btrfs_path *path, int data_size,
-                         int empty)
+                         *root, struct btrfs_path *path, int min_data_size,
+                         int data_size, int empty, u32 max_slot)
 {
        struct extent_buffer *right = path->nodes[0];
        struct extent_buffer *left;
@@ -2761,8 +2779,9 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
                goto out;
        }
 
-       return __push_leaf_left(trans, root, path, data_size,
-                              empty, left, free_space, right_nritems);
+       return __push_leaf_left(trans, root, path, min_data_size,
+                              empty, left, free_space, right_nritems,
+                              max_slot);
 out:
        btrfs_tree_unlock(left);
        free_extent_buffer(left);
@@ -2854,6 +2873,64 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+/*
+ * double splits happen when we need to insert a big item in the middle
+ * of a leaf.  A double split can leave us with 3 mostly empty leaves:
+ * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
+ *          A                 B                 C
+ *
+ * We avoid this by trying to push the items on either side of our target
+ * into the adjacent leaves.  If all goes well we can avoid the double split
+ * completely.
+ */
+static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
+                                         struct btrfs_root *root,
+                                         struct btrfs_path *path,
+                                         int data_size)
+{
+       int ret;
+       int progress = 0;
+       int slot;
+       u32 nritems;
+
+       slot = path->slots[0];
+
+       /*
+        * try to push all the items after our slot into the
+        * right leaf
+        */
+       ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
+       if (ret < 0)
+               return ret;
+
+       if (ret == 0)
+               progress++;
+
+       nritems = btrfs_header_nritems(path->nodes[0]);
+       /*
+        * our goal is to get our slot at the start or end of a leaf.  If
+        * we've done so we're done
+        */
+       if (path->slots[0] == 0 || path->slots[0] == nritems)
+               return 0;
+
+       if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+               return 0;
+
+       /* try to push all the items before our slot into the next leaf */
+       slot = path->slots[0];
+       ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
+       if (ret < 0)
+               return ret;
+
+       if (ret == 0)
+               progress++;
+
+       if (progress)
+               return 0;
+       return 1;
+}
+
 /*
  * split the path's leaf in two, making sure there is at least data_size
  * available for the resulting leaf level of the path.
@@ -2876,6 +2953,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
        int wret;
        int split;
        int num_doubles = 0;
+       int tried_avoid_double = 0;
 
        l = path->nodes[0];
        slot = path->slots[0];
@@ -2884,12 +2962,14 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
                return -EOVERFLOW;
 
        /* first try to make some room by pushing left and right */
-       if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
-               wret = push_leaf_right(trans, root, path, data_size, 0);
+       if (data_size) {
+               wret = push_leaf_right(trans, root, path, data_size,
+                                      data_size, 0, 0);
                if (wret < 0)
                        return wret;
                if (wret) {
-                       wret = push_leaf_left(trans, root, path, data_size, 0);
+                       wret = push_leaf_left(trans, root, path, data_size,
+                                             data_size, 0, (u32)-1);
                        if (wret < 0)
                                return wret;
                }
@@ -2923,6 +3003,8 @@ again:
                                if (mid != nritems &&
                                    leaf_space_used(l, mid, nritems - mid) +
                                    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+                                       if (data_size && !tried_avoid_double)
+                                               goto push_for_double;
                                        split = 2;
                                }
                        }
@@ -2939,6 +3021,8 @@ again:
                                if (mid != nritems &&
                                    leaf_space_used(l, mid, nritems - mid) +
                                    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+                                       if (data_size && !tried_avoid_double)
+                                               goto push_for_double;
                                        split = 2 ;
                                }
                        }
@@ -3019,6 +3103,13 @@ again:
        }
 
        return ret;
+
+push_for_double:
+       push_for_double_split(trans, root, path, data_size);
+       tried_avoid_double = 1;
+       if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+               return 0;
+       goto again;
 }
 
 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
@@ -3915,13 +4006,15 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                        extent_buffer_get(leaf);
 
                        btrfs_set_path_blocking(path);
-                       wret = push_leaf_left(trans, root, path, 1, 1);
+                       wret = push_leaf_left(trans, root, path, 1, 1,
+                                             1, (u32)-1);
                        if (wret < 0 && wret != -ENOSPC)
                                ret = wret;
 
                        if (path->nodes[0] == leaf &&
                            btrfs_header_nritems(leaf)) {
-                               wret = push_leaf_right(trans, root, path, 1, 1);
+                               wret = push_leaf_right(trans, root, path, 1,
+                                                      1, 1, 0);
                                if (wret < 0 && wret != -ENOSPC)
                                        ret = wret;
                        }
index 4dbaf89b1337632ac7800b84b183d6cd0ab14242..9254b3d58dbef22974af3c7c61dbc9a4af7a33b6 100644 (file)
@@ -1458,7 +1458,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
         */
 
        /* the destination must be opened for writing */
-       if (!(file->f_mode & FMODE_WRITE))
+       if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
                return -EINVAL;
 
        ret = mnt_want_write(file->f_path.mnt);
@@ -1511,7 +1511,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
        /* determine range to clone */
        ret = -EINVAL;
-       if (off >= src->i_size || off + len > src->i_size)
+       if (off + len > src->i_size || off + len < off)
                goto out_unlock;
        if (len == 0)
                olen = len = src->i_size - off;
@@ -1578,6 +1578,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        u64 disko = 0, diskl = 0;
                        u64 datao = 0, datal = 0;
                        u8 comp;
+                       u64 endoff;
 
                        size = btrfs_item_size_nr(leaf, slot);
                        read_extent_buffer(leaf, buf,
@@ -1712,9 +1713,18 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        btrfs_release_path(root, path);
 
                        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-                       if (new_key.offset + datal > inode->i_size)
-                               btrfs_i_size_write(inode,
-                                                  new_key.offset + datal);
+
+                       /*
+                        * we round up to the block size at eof when
+                        * determining which extents to clone above,
+                        * but shouldn't round up the file size
+                        */
+                       endoff = new_key.offset + datal;
+                       if (endoff > off+olen)
+                               endoff = off+olen;
+                       if (endoff > inode->i_size)
+                               btrfs_i_size_write(inode, endoff);
+
                        BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
                        ret = btrfs_update_inode(trans, root, inode);
                        BUG_ON(ret);
index 3fe49042d8adaffe6b0f18de5d1b1fd63def729f..6d44053ecff1f08af7bee8546a0435ac55a0e1e2 100644 (file)
@@ -613,6 +613,9 @@ static void ceph_x_destroy(struct ceph_auth_client *ac)
                remove_ticket_handler(ac, th);
        }
 
+       if (xi->auth_authorizer.buf)
+               ceph_buffer_put(xi->auth_authorizer.buf);
+
        kfree(ac->private);
        ac->private = NULL;
 }
index 3ab79f6c4ce8808fa7c72a66adc23dcf0fcfc97d..416c08d315db52a409e85cc094588d3b5bfceed5 100644 (file)
@@ -1514,6 +1514,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        ceph_encode_filepath(&p, end, ino1, path1);
        ceph_encode_filepath(&p, end, ino2, path2);
 
+       /* make note of release offset, in case we need to replay */
+       req->r_request_release_offset = p - msg->front.iov_base;
+
        /* cap releases */
        releases = 0;
        if (req->r_inode_drop)
@@ -1580,6 +1583,32 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
        dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
             req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
 
+       if (req->r_got_unsafe) {
+               /*
+                * Replay.  Do not regenerate message (and rebuild
+                * paths, etc.); just use the original message.
+                * Rebuilding paths will break for renames because
+                * d_move mangles the src name.
+                */
+               msg = req->r_request;
+               rhead = msg->front.iov_base;
+
+               flags = le32_to_cpu(rhead->flags);
+               flags |= CEPH_MDS_FLAG_REPLAY;
+               rhead->flags = cpu_to_le32(flags);
+
+               if (req->r_target_inode)
+                       rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
+
+               rhead->num_retry = req->r_attempts - 1;
+
+               /* remove cap/dentry releases from message */
+               rhead->num_releases = 0;
+               msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset);
+               msg->front.iov_len = req->r_request_release_offset;
+               return 0;
+       }
+
        if (req->r_request) {
                ceph_msg_put(req->r_request);
                req->r_request = NULL;
@@ -1601,13 +1630,9 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
        rhead->flags = cpu_to_le32(flags);
        rhead->num_fwd = req->r_num_fwd;
        rhead->num_retry = req->r_attempts - 1;
+       rhead->ino = 0;
 
        dout(" r_locked_dir = %p\n", req->r_locked_dir);
-
-       if (req->r_target_inode && req->r_got_unsafe)
-               rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
-       else
-               rhead->ino = 0;
        return 0;
 }
 
index b292fa42a66d8026b49a0217ffc0b4e89f381497..952410c60d093e7ba5c39ed630c6283664d90c5a 100644 (file)
@@ -188,6 +188,7 @@ struct ceph_mds_request {
        int r_old_inode_drop, r_old_inode_unless;
 
        struct ceph_msg  *r_request;  /* original request */
+       int r_request_release_offset;
        struct ceph_msg  *r_reply;
        struct ceph_mds_reply_info_parsed r_reply_info;
        int r_err;
index 9ad43a310a415595e338b3a45187131d31fca8cd..15167b2daa5562c3a093ba03fa14932657718551 100644 (file)
@@ -43,7 +43,8 @@ static void ceph_fault(struct ceph_connection *con);
  * nicely render a sockaddr as a string.
  */
 #define MAX_ADDR_STR 20
-static char addr_str[MAX_ADDR_STR][40];
+#define MAX_ADDR_STR_LEN 60
+static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
 static DEFINE_SPINLOCK(addr_str_lock);
 static int last_addr_str;
 
@@ -52,7 +53,6 @@ const char *pr_addr(const struct sockaddr_storage *ss)
        int i;
        char *s;
        struct sockaddr_in *in4 = (void *)ss;
-       unsigned char *quad = (void *)&in4->sin_addr.s_addr;
        struct sockaddr_in6 *in6 = (void *)ss;
 
        spin_lock(&addr_str_lock);
@@ -64,25 +64,13 @@ const char *pr_addr(const struct sockaddr_storage *ss)
 
        switch (ss->ss_family) {
        case AF_INET:
-               sprintf(s, "%u.%u.%u.%u:%u",
-                       (unsigned int)quad[0],
-                       (unsigned int)quad[1],
-                       (unsigned int)quad[2],
-                       (unsigned int)quad[3],
-                       (unsigned int)ntohs(in4->sin_port));
+               snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
+                        (unsigned int)ntohs(in4->sin_port));
                break;
 
        case AF_INET6:
-               sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
-                       in6->sin6_addr.s6_addr16[0],
-                       in6->sin6_addr.s6_addr16[1],
-                       in6->sin6_addr.s6_addr16[2],
-                       in6->sin6_addr.s6_addr16[3],
-                       in6->sin6_addr.s6_addr16[4],
-                       in6->sin6_addr.s6_addr16[5],
-                       in6->sin6_addr.s6_addr16[6],
-                       in6->sin6_addr.s6_addr16[7],
-                       (unsigned int)ntohs(in6->sin6_port));
+               snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
+                        (unsigned int)ntohs(in6->sin6_port));
                break;
 
        default:
@@ -215,12 +203,13 @@ static void set_sock_callbacks(struct socket *sock,
  */
 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
 {
-       struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr;
+       struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
        struct socket *sock;
        int ret;
 
        BUG_ON(con->sock);
-       ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+       ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
+                              IPPROTO_TCP, &sock);
        if (ret)
                return ERR_PTR(ret);
        con->sock = sock;
@@ -234,7 +223,8 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
 
        dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
 
-       ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK);
+       ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
+                                O_NONBLOCK);
        if (ret == -EINPROGRESS) {
                dout("connect %s EINPROGRESS sk_state = %u\n",
                     pr_addr(&con->peer_addr.in_addr),
@@ -1009,19 +999,32 @@ int ceph_parse_ips(const char *c, const char *end,
                struct sockaddr_in *in4 = (void *)ss;
                struct sockaddr_in6 *in6 = (void *)ss;
                int port;
+               char delim = ',';
+
+               if (*p == '[') {
+                       delim = ']';
+                       p++;
+               }
 
                memset(ss, 0, sizeof(*ss));
                if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
-                            ',', &ipend)) {
+                            delim, &ipend))
                        ss->ss_family = AF_INET;
-               else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
-                                   ',', &ipend)) {
+               else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
+                                 delim, &ipend))
                        ss->ss_family = AF_INET6;
-               } else {
+               else
                        goto bad;
-               }
                p = ipend;
 
+               if (delim == ']') {
+                       if (*p != ']') {
+                               dout("missing matching ']'\n");
+                               goto bad;
+                       }
+                       p++;
+               }
+
                /* port? */
                if (p < end && *p == ':') {
                        port = 0;
@@ -1055,7 +1058,7 @@ int ceph_parse_ips(const char *c, const char *end,
        return 0;
 
 bad:
-       pr_err("parse_ips bad ip '%s'\n", c);
+       pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
        return -EINVAL;
 }
 
@@ -2015,20 +2018,20 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
 {
        mutex_lock(&con->mutex);
        if (!list_empty(&msg->list_head)) {
-               dout("con_revoke %p msg %p\n", con, msg);
+               dout("con_revoke %p msg %p - was on queue\n", con, msg);
                list_del_init(&msg->list_head);
                ceph_msg_put(msg);
                msg->hdr.seq = 0;
-               if (con->out_msg == msg) {
-                       ceph_msg_put(con->out_msg);
-                       con->out_msg = NULL;
-               }
+       }
+       if (con->out_msg == msg) {
+               dout("con_revoke %p msg %p - was sending\n", con, msg);
+               con->out_msg = NULL;
                if (con->out_kvec_is_msg) {
                        con->out_skip = con->out_kvec_bytes;
                        con->out_kvec_is_msg = false;
                }
-       } else {
-               dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg);
+               ceph_msg_put(msg);
+               msg->hdr.seq = 0;
        }
        mutex_unlock(&con->mutex);
 }
index 50ce64ebd3301eb24cf358ca7d6c1cdb6c0fe444..277f8b33957757ef506f6ac18816699b6b8156da 100644 (file)
@@ -568,6 +568,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
                if (ev > CEPH_PG_POOL_VERSION) {
                        pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
                                   ev, CEPH_PG_POOL_VERSION);
+                       kfree(pi);
                        goto bad;
                }
                __decode_pool(p, pi);
index c8c78ba078271163f567c26045afff6cf0b5cd71..86d4db15473e51b3b95fa4103eedf28227cc336a 100644 (file)
@@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent);
  *
  * In this case we return -1 to tell the caller that we baled.
  */
-static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
+static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
index 4a48c0f4b40275bce81f5c4b5fb40784a45a00bc..84da64b551b2c1170f4e68550ead612ed6fd7b41 100644 (file)
@@ -1041,6 +1041,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
 
        if (gfs2_is_stuffed(ip)) {
                u64 dsize = size + sizeof(struct gfs2_inode);
+               ip->i_disksize = size;
                ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
                gfs2_dinode_out(ip, dibh->b_data);
index 8295c5b5d4a9591e5d112d88ec99a8fd50bcc4d7..26ca3361a8bcc3525758ed00c6186fb62494a6e3 100644 (file)
@@ -392,7 +392,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
        unsigned totlen = be16_to_cpu(dent->de_rec_len);
 
        if (gfs2_dirent_sentinel(dent))
-               actual = GFS2_DIRENT_SIZE(0);
+               actual = 0;
        if (totlen - actual >= required)
                return 1;
        return 0;
index ddcdbf4935366b4eac1a85a74590f98c3a4b0c62..0898f3ec8212e5599df8c547d7d106aeb15d6c86 100644 (file)
@@ -706,8 +706,18 @@ static void glock_work_func(struct work_struct *work)
 {
        unsigned long delay = 0;
        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+       struct gfs2_holder *gh;
        int drop_ref = 0;
 
+       if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) {
+               spin_lock(&gl->gl_spin);
+               gh = find_first_waiter(gl);
+               if (gh && (gh->gh_flags & LM_FLAG_NOEXP) &&
+                   test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
+                       set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+               spin_unlock(&gl->gl_spin);
+       }
+
        if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
                finish_xmote(gl, gl->gl_reply);
                drop_ref = 1;
@@ -1348,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
 }
 
 
-static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
+static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        struct gfs2_glock *gl;
        int may_demote;
index b5612cbb62a51b75dc97699490658ce6939a4118..f03afd9c44bc748b51811bff46ccdab3d66fa619 100644 (file)
@@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
 {
        struct inode *inode;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error;
 
        inode = gfs2_iget(sb, no_addr);
@@ -198,6 +198,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
                ip->i_iopen_gh.gh_gl->gl_object = ip;
 
                gfs2_glock_put(io_gl);
+               io_gl = NULL;
 
                if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
                        goto gfs2_nfsbypass;
@@ -228,7 +229,8 @@ gfs2_nfsbypass:
 fail_glock:
        gfs2_glock_dq(&ip->i_iopen_gh);
 fail_iopen:
-       gfs2_glock_put(io_gl);
+       if (io_gl)
+               gfs2_glock_put(io_gl);
 fail_put:
        if (inode->i_state & I_NEW)
                ip->i_gl->gl_object = NULL;
@@ -256,7 +258,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 {
        struct gfs2_sbd *sdp;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error;
        struct gfs2_holder gh;
        struct inode *inode;
@@ -293,6 +295,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 
        ip->i_iopen_gh.gh_gl->gl_object = ip;
        gfs2_glock_put(io_gl);
+       io_gl = NULL;
 
        inode->i_mode = DT2IF(DT_UNKNOWN);
 
@@ -319,7 +322,8 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 fail_glock:
        gfs2_glock_dq(&ip->i_iopen_gh);
 fail_iopen:
-       gfs2_glock_put(io_gl);
+       if (io_gl)
+               gfs2_glock_put(io_gl);
 fail_put:
        ip->i_gl->gl_object = NULL;
        gfs2_glock_put(ip->i_gl);
index 49667d68769ef8fe5a82920a79f1a37df216804b..8f02d3db8f428fa345062fa50581f1a35c420a0e 100644 (file)
@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list);
 static atomic_t qd_lru_count = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(qd_lru_lock);
 
-int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
+int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        struct gfs2_quota_data *qd;
        struct gfs2_sbd *sdp;
@@ -694,10 +694,8 @@ get_a_page:
                if (!buffer_mapped(bh))
                        goto unlock_out;
                /* If it's a newly allocated disk block for quota, zero it */
-               if (buffer_new(bh)) {
-                       memset(bh->b_data, 0, bh->b_size);
-                       set_buffer_uptodate(bh);
-               }
+               if (buffer_new(bh))
+                       zero_user(page, pos - blocksize, bh->b_size);
        }
 
        if (PageUptodate(page))
@@ -723,7 +721,7 @@ get_a_page:
 
        /* If quota straddles page boundary, we need to update the rest of the
         * quota at the beginning of the next page */
-       if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
+       if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
                ptr = ptr + nbytes;
                nbytes = sizeof(struct gfs2_quota) - nbytes;
                offset = 0;
index 195f60c8bd144b6ae5040c878e4f14305cafccc4..e7d236ca48bd28df49f8555fbaa9e1b90460fbd0 100644 (file)
@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
        return ret;
 }
 
-extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
+extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask);
 extern const struct quotactl_ops gfs2_quotactl_ops;
 
 #endif /* __QUOTA_DOT_H__ */
index 2bee20ae3d65ba74f970129ec8d147ab7cd4fe50..722860b323a9b40e0dc9f8b2e27e68563eb293c1 100644 (file)
@@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan)
  * This function is passed the number of inodes to scan, and it returns the
  * total number of remaining possibly-reclaimable inodes.
  */
-static int shrink_icache_memory(int nr, gfp_t gfp_mask)
+static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        if (nr) {
                /*
index bc2ff5932769199f271db9055f7881b2e9c4bf54..036880895bfc8c2e99c42f6fd900819315bd508a 100644 (file)
@@ -297,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
        struct page *new_page;
        unsigned int new_offset;
        struct buffer_head *bh_in = jh2bh(jh_in);
-       struct jbd2_buffer_trigger_type *triggers;
        journal_t *journal = transaction->t_journal;
 
        /*
@@ -328,21 +327,21 @@ repeat:
                done_copy_out = 1;
                new_page = virt_to_page(jh_in->b_frozen_data);
                new_offset = offset_in_page(jh_in->b_frozen_data);
-               triggers = jh_in->b_frozen_triggers;
        } else {
                new_page = jh2bh(jh_in)->b_page;
                new_offset = offset_in_page(jh2bh(jh_in)->b_data);
-               triggers = jh_in->b_triggers;
        }
 
        mapped_data = kmap_atomic(new_page, KM_USER0);
        /*
-        * Fire any commit trigger.  Do this before checking for escaping,
-        * as the trigger may modify the magic offset.  If a copy-out
-        * happens afterwards, it will have the correct data in the buffer.
+        * Fire data frozen trigger if data already wasn't frozen.  Do this
+        * before checking for escaping, as the trigger may modify the magic
+        * offset.  If a copy-out happens afterwards, it will have the correct
+        * data in the buffer.
         */
-       jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset,
-                                  triggers);
+       if (!done_copy_out)
+               jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
+                                          jh_in->b_triggers);
 
        /*
         * Check for escaping
index e214d68620ac167fb5ddeb71775ead1b6063a571..b8e0806681bb0f4acf63964fa3f72ae00e1f8900 100644 (file)
@@ -725,6 +725,9 @@ done:
                page = jh2bh(jh)->b_page;
                offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
                source = kmap_atomic(page, KM_USER0);
+               /* Fire data frozen trigger just before we copy the data */
+               jbd2_buffer_frozen_trigger(jh, source + offset,
+                                          jh->b_triggers);
                memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
                kunmap_atomic(source, KM_USER0);
 
@@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh,
        jh->b_triggers = type;
 }
 
-void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data,
+void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
                                struct jbd2_buffer_trigger_type *triggers)
 {
        struct buffer_head *bh = jh2bh(jh);
 
-       if (!triggers || !triggers->t_commit)
+       if (!triggers || !triggers->t_frozen)
                return;
 
-       triggers->t_commit(triggers, bh, mapped_data, bh->b_size);
+       triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
 }
 
 void jbd2_buffer_abort_trigger(struct journal_head *jh,
index ec88ff3d04a9194a4b595701e917ef784b8bdc2a..e28f21b95344378aa09ce95771481711870be902 100644 (file)
@@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache)
  * What the mbcache registers as to get shrunk dynamically.
  */
 
-static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
 
 static struct shrinker mb_cache_shrinker = {
        .shrink = mb_cache_shrink_fn,
@@ -191,13 +191,14 @@ forget:
  * This function is called by the kernel memory management when memory
  * gets low.
  *
+ * @shrink: (ignored)
  * @nr_to_scan: Number of objects to scan
  * @gfp_mask: (ignored)
  *
  * Returns the number of objects which are present in the cache.
  */
 static int
-mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
+mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        LIST_HEAD(free_list);
        struct list_head *l, *ltmp;
index 782b431ef91c9f521f18d5eeba5a33fbceb5b612..e60416d3f8188b548ec1d55e3438ea7e6f2c926f 100644 (file)
@@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head)
        }
 }
 
-int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
+int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        LIST_HEAD(head);
        struct nfs_inode *nfsi;
index d8bd619e386c29a47224463b9a30a156bd76c6a8..e70f44b9b3f43ff5a837435e886fd73f2372a20c 100644 (file)
@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[];
 void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
 
 /* dir.c */
-extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
+extern int nfs_access_cache_shrinker(struct shrinker *shrink,
+                                       int nr_to_scan, gfp_t gfp_mask);
 
 /* inode.c */
 extern struct workqueue_struct *nfsiod_workqueue;
index 3623ca20cc186046cfbf840f03ce9705146b17ba..356e976772bf1adb112aaf3c8b28ddb71b639afb 100644 (file)
@@ -196,15 +196,14 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
                        dump_stack();
                        goto bail;
                }
-
-               past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
-               mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
-                    (unsigned long long)past_eof);
-
-               if (create && (iblock >= past_eof))
-                       set_buffer_new(bh_result);
        }
 
+       past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
+       mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
+            (unsigned long long)past_eof);
+       if (create && (iblock >= past_eof))
+               set_buffer_new(bh_result);
+
 bail:
        if (err < 0)
                err = -EIO;
@@ -459,36 +458,6 @@ int walk_page_buffers(     handle_t *handle,
        return ret;
 }
 
-handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
-                                                        struct page *page,
-                                                        unsigned from,
-                                                        unsigned to)
-{
-       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-       handle_t *handle;
-       int ret = 0;
-
-       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
-       if (IS_ERR(handle)) {
-               ret = -ENOMEM;
-               mlog_errno(ret);
-               goto out;
-       }
-
-       if (ocfs2_should_order_data(inode)) {
-               ret = ocfs2_jbd2_file_inode(handle, inode);
-               if (ret < 0)
-                       mlog_errno(ret);
-       }
-out:
-       if (ret) {
-               if (!IS_ERR(handle))
-                       ocfs2_commit_trans(osb, handle);
-               handle = ERR_PTR(ret);
-       }
-       return handle;
-}
-
 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
 {
        sector_t status;
@@ -1131,23 +1100,37 @@ out:
  */
 static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                                      struct ocfs2_write_ctxt *wc,
-                                     u32 cpos, loff_t user_pos, int new,
+                                     u32 cpos, loff_t user_pos,
+                                     unsigned user_len, int new,
                                      struct page *mmap_page)
 {
        int ret = 0, i;
-       unsigned long start, target_index, index;
+       unsigned long start, target_index, end_index, index;
        struct inode *inode = mapping->host;
+       loff_t last_byte;
 
        target_index = user_pos >> PAGE_CACHE_SHIFT;
 
        /*
         * Figure out how many pages we'll be manipulating here. For
         * non allocating write, we just change the one
-        * page. Otherwise, we'll need a whole clusters worth.
+        * page. Otherwise, we'll need a whole clusters worth.  If we're
+        * writing past i_size, we only need enough pages to cover the
+        * last page of the write.
         */
        if (new) {
                wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
                start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
+               /*
+                * We need the index *past* the last page we could possibly
+                * touch.  This is the page past the end of the write or
+                * i_size, whichever is greater.
+                */
+               last_byte = max(user_pos + user_len, i_size_read(inode));
+               BUG_ON(last_byte < 1);
+               end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
+               if ((start + wc->w_num_pages) > end_index)
+                       wc->w_num_pages = end_index - start;
        } else {
                wc->w_num_pages = 1;
                start = target_index;
@@ -1620,21 +1603,20 @@ out:
  * write path can treat it as an non-allocating write, which has no
  * special case code for sparse/nonsparse files.
  */
-static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
-                                       unsigned len,
+static int ocfs2_expand_nonsparse_inode(struct inode *inode,
+                                       struct buffer_head *di_bh,
+                                       loff_t pos, unsigned len,
                                        struct ocfs2_write_ctxt *wc)
 {
        int ret;
-       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        loff_t newsize = pos + len;
 
-       if (ocfs2_sparse_alloc(osb))
-               return 0;
+       BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
 
        if (newsize <= i_size_read(inode))
                return 0;
 
-       ret = ocfs2_extend_no_holes(inode, newsize, pos);
+       ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
        if (ret)
                mlog_errno(ret);
 
@@ -1644,6 +1626,18 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
        return ret;
 }
 
+static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
+                          loff_t pos)
+{
+       int ret = 0;
+
+       BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
+       if (pos > i_size_read(inode))
+               ret = ocfs2_zero_extend(inode, di_bh, pos);
+
+       return ret;
+}
+
 int ocfs2_write_begin_nolock(struct address_space *mapping,
                             loff_t pos, unsigned len, unsigned flags,
                             struct page **pagep, void **fsdata,
@@ -1679,7 +1673,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
                }
        }
 
-       ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);
+       if (ocfs2_sparse_alloc(osb))
+               ret = ocfs2_zero_tail(inode, di_bh, pos);
+       else
+               ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
+                                                  wc);
        if (ret) {
                mlog_errno(ret);
                goto out;
@@ -1789,7 +1787,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
         * that we can zero and flush if we error after adding the
         * extent.
         */
-       ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
+       ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
                                         cluster_of_pages, mmap_page);
        if (ret) {
                mlog_errno(ret);
index 6b5a492e1749f8063097a1ebef4e3ef9ad53fdb5..153abb5abef024d2ca63f6d4a23ee1c126b89f13 100644 (file)
@@ -1671,7 +1671,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
        struct dlm_ctxt *dlm = NULL;
        struct dlm_ctxt *new_ctxt = NULL;
 
-       if (strlen(domain) > O2NM_MAX_NAME_LEN) {
+       if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
                ret = -ENAMETOOLONG;
                mlog(ML_ERROR, "domain name length too long\n");
                goto leave;
@@ -1709,6 +1709,7 @@ retry:
                }
 
                if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
+                       spin_unlock(&dlm_domain_lock);
                        mlog(ML_ERROR,
                             "Requested locking protocol version is not "
                             "compatible with already registered domain "
index 4a7506a4e314c34014fd58ab739e836f9babe4e3..94b97fc6a88e62522b1958ed6a89de5e6803525b 100644 (file)
@@ -2808,14 +2808,8 @@ again:
                mlog(0, "trying again...\n");
                goto again;
        }
-       /* now that we are sure the MIGRATING state is there, drop
-        * the unneded state which blocked threads trying to DIRTY */
-       spin_lock(&res->spinlock);
-       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
-       BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
-       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
-       spin_unlock(&res->spinlock);
 
+       ret = 0;
        /* did the target go down or die? */
        spin_lock(&dlm->spinlock);
        if (!test_bit(target, dlm->domain_map)) {
@@ -2825,10 +2819,22 @@ again:
        }
        spin_unlock(&dlm->spinlock);
 
+       /*
+        * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
+        * another try; otherwise, we are sure the MIGRATING state is there,
+        * drop the unneded state which blocked threads trying to DIRTY
+        */
+       spin_lock(&res->spinlock);
+       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
+       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
+       if (!ret)
+               BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
+       spin_unlock(&res->spinlock);
+
        /*
         * at this point:
         *
-        *   o the DLM_LOCK_RES_MIGRATING flag is set
+        *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
         *   o there are no pending asts on this lockres
         *   o all processes trying to reserve an ast on this
         *     lockres must wait for the MIGRATING flag to clear
index f8b75ce4be7019ab20d9c5f40d1567a4e520b8fd..9dfaac73b36da4350cc5a2f2dc7a34918a21e11e 100644 (file)
@@ -463,7 +463,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
        if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
                int bit;
 
-               bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
+               bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
                if (bit >= O2NM_MAX_NODES || bit < 0)
                        dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
                else
index 6a13ea64c44773fc239ad55a5237422ddee18e94..2b10b36d15772efcae056a62b4df1a9fe8aa2c53 100644 (file)
@@ -724,28 +724,55 @@ leave:
        return status;
 }
 
+/*
+ * While a write will already be ordering the data, a truncate will not.
+ * Thus, we need to explicitly order the zeroed pages.
+ */
+static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
+{
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       handle_t *handle = NULL;
+       int ret = 0;
+
+       if (!ocfs2_should_order_data(inode))
+               goto out;
+
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+       if (IS_ERR(handle)) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_jbd2_file_inode(handle, inode);
+       if (ret < 0)
+               mlog_errno(ret);
+
+out:
+       if (ret) {
+               if (!IS_ERR(handle))
+                       ocfs2_commit_trans(osb, handle);
+               handle = ERR_PTR(ret);
+       }
+       return handle;
+}
+
 /* Some parts of this taken from generic_cont_expand, which turned out
  * to be too fragile to do exactly what we need without us having to
  * worry about recursive locking in ->write_begin() and ->write_end(). */
-static int ocfs2_write_zero_page(struct inode *inode,
-                                u64 size)
+static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
+                                u64 abs_to)
 {
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long index;
-       unsigned int offset;
+       unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
        handle_t *handle = NULL;
-       int ret;
+       int ret = 0;
+       unsigned zero_from, zero_to, block_start, block_end;
 
-       offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we
-       ** skip the prepare.  make sure we never send an offset for the start
-       ** of a block
-       */
-       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
-               offset++;
-       }
-       index = size >> PAGE_CACHE_SHIFT;
+       BUG_ON(abs_from >= abs_to);
+       BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
+       BUG_ON(abs_from & (inode->i_blkbits - 1));
 
        page = grab_cache_page(mapping, index);
        if (!page) {
@@ -754,31 +781,56 @@ static int ocfs2_write_zero_page(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
-       if (ret < 0) {
-               mlog_errno(ret);
-               goto out_unlock;
-       }
+       /* Get the offsets within the page that we want to zero */
+       zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
+       zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
+       if (!zero_to)
+               zero_to = PAGE_CACHE_SIZE;
 
-       if (ocfs2_should_order_data(inode)) {
-               handle = ocfs2_start_walk_page_trans(inode, page, offset,
-                                                    offset);
-               if (IS_ERR(handle)) {
-                       ret = PTR_ERR(handle);
-                       handle = NULL;
+       mlog(0,
+            "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
+            (unsigned long long)abs_from, (unsigned long long)abs_to,
+            index, zero_from, zero_to);
+
+       /* We know that zero_from is block aligned */
+       for (block_start = zero_from; block_start < zero_to;
+            block_start = block_end) {
+               block_end = block_start + (1 << inode->i_blkbits);
+
+               /*
+                * block_start is block-aligned.  Bump it by one to
+                * force ocfs2_{prepare,commit}_write() to zero the
+                * whole block.
+                */
+               ret = ocfs2_prepare_write_nolock(inode, page,
+                                                block_start + 1,
+                                                block_start + 1);
+               if (ret < 0) {
+                       mlog_errno(ret);
                        goto out_unlock;
                }
-       }
 
-       /* must not update i_size! */
-       ret = block_commit_write(page, offset, offset);
-       if (ret < 0)
-               mlog_errno(ret);
-       else
-               ret = 0;
+               if (!handle) {
+                       handle = ocfs2_zero_start_ordered_transaction(inode);
+                       if (IS_ERR(handle)) {
+                               ret = PTR_ERR(handle);
+                               handle = NULL;
+                               break;
+                       }
+               }
+
+               /* must not update i_size! */
+               ret = block_commit_write(page, block_start + 1,
+                                        block_start + 1);
+               if (ret < 0)
+                       mlog_errno(ret);
+               else
+                       ret = 0;
+       }
 
        if (handle)
                ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+
 out_unlock:
        unlock_page(page);
        page_cache_release(page);
@@ -786,22 +838,114 @@ out:
        return ret;
 }
 
-static int ocfs2_zero_extend(struct inode *inode,
-                            u64 zero_to_size)
+/*
+ * Find the next range to zero.  We do this in terms of bytes because
+ * that's what ocfs2_zero_extend() wants, and it is dealing with the
+ * pagecache.  We may return multiple extents.
+ *
+ * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
+ * needs to be zeroed.  range_start and range_end return the next zeroing
+ * range.  A subsequent call should pass the previous range_end as its
+ * zero_start.  If range_end is 0, there's nothing to do.
+ *
+ * Unwritten extents are skipped over.  Refcounted extents are CoWd.
+ */
+static int ocfs2_zero_extend_get_range(struct inode *inode,
+                                      struct buffer_head *di_bh,
+                                      u64 zero_start, u64 zero_end,
+                                      u64 *range_start, u64 *range_end)
 {
-       int ret = 0;
-       u64 start_off;
-       struct super_block *sb = inode->i_sb;
+       int rc = 0, needs_cow = 0;
+       u32 p_cpos, zero_clusters = 0;
+       u32 zero_cpos =
+               zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+       u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
+       unsigned int num_clusters = 0;
+       unsigned int ext_flags = 0;
 
-       start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
-       while (start_off < zero_to_size) {
-               ret = ocfs2_write_zero_page(inode, start_off);
-               if (ret < 0) {
-                       mlog_errno(ret);
+       while (zero_cpos < last_cpos) {
+               rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
+                                       &num_clusters, &ext_flags);
+               if (rc) {
+                       mlog_errno(rc);
+                       goto out;
+               }
+
+               if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+                       zero_clusters = num_clusters;
+                       if (ext_flags & OCFS2_EXT_REFCOUNTED)
+                               needs_cow = 1;
+                       break;
+               }
+
+               zero_cpos += num_clusters;
+       }
+       if (!zero_clusters) {
+               *range_end = 0;
+               goto out;
+       }
+
+       while ((zero_cpos + zero_clusters) < last_cpos) {
+               rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
+                                       &p_cpos, &num_clusters,
+                                       &ext_flags);
+               if (rc) {
+                       mlog_errno(rc);
                        goto out;
                }
 
-               start_off += sb->s_blocksize;
+               if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
+                       break;
+               if (ext_flags & OCFS2_EXT_REFCOUNTED)
+                       needs_cow = 1;
+               zero_clusters += num_clusters;
+       }
+       if ((zero_cpos + zero_clusters) > last_cpos)
+               zero_clusters = last_cpos - zero_cpos;
+
+       if (needs_cow) {
+               rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters,
+                                       UINT_MAX);
+               if (rc) {
+                       mlog_errno(rc);
+                       goto out;
+               }
+       }
+
+       *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
+       *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
+                                            zero_cpos + zero_clusters);
+
+out:
+       return rc;
+}
+
+/*
+ * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
+ * has made sure that the entire range needs zeroing.
+ */
+static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
+                                  u64 range_end)
+{
+       int rc = 0;
+       u64 next_pos;
+       u64 zero_pos = range_start;
+
+       mlog(0, "range_start = %llu, range_end = %llu\n",
+            (unsigned long long)range_start,
+            (unsigned long long)range_end);
+       BUG_ON(range_start >= range_end);
+
+       while (zero_pos < range_end) {
+               next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
+               if (next_pos > range_end)
+                       next_pos = range_end;
+               rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
+               if (rc < 0) {
+                       mlog_errno(rc);
+                       break;
+               }
+               zero_pos = next_pos;
 
                /*
                 * Very large extends have the potential to lock up
@@ -810,16 +954,63 @@ static int ocfs2_zero_extend(struct inode *inode,
                cond_resched();
        }
 
-out:
+       return rc;
+}
+
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+                     loff_t zero_to_size)
+{
+       int ret = 0;
+       u64 zero_start, range_start = 0, range_end = 0;
+       struct super_block *sb = inode->i_sb;
+
+       zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
+       mlog(0, "zero_start %llu for i_size %llu\n",
+            (unsigned long long)zero_start,
+            (unsigned long long)i_size_read(inode));
+       while (zero_start < zero_to_size) {
+               ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
+                                                 zero_to_size,
+                                                 &range_start,
+                                                 &range_end);
+               if (ret) {
+                       mlog_errno(ret);
+                       break;
+               }
+               if (!range_end)
+                       break;
+               /* Trim the ends */
+               if (range_start < zero_start)
+                       range_start = zero_start;
+               if (range_end > zero_to_size)
+                       range_end = zero_to_size;
+
+               ret = ocfs2_zero_extend_range(inode, range_start,
+                                             range_end);
+               if (ret) {
+                       mlog_errno(ret);
+                       break;
+               }
+               zero_start = range_end;
+       }
+
        return ret;
 }
 
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+                         u64 new_i_size, u64 zero_to)
 {
        int ret;
        u32 clusters_to_add;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 
+       /*
+        * Only quota files call this without a bh, and they can't be
+        * refcounted.
+        */
+       BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+       BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
+
        clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
        if (clusters_to_add < oi->ip_clusters)
                clusters_to_add = 0;
@@ -840,7 +1031,7 @@ int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
         * still need to zero the area between the old i_size and the
         * new i_size.
         */
-       ret = ocfs2_zero_extend(inode, zero_to);
+       ret = ocfs2_zero_extend(inode, di_bh, zero_to);
        if (ret < 0)
                mlog_errno(ret);
 
@@ -862,27 +1053,15 @@ static int ocfs2_extend_file(struct inode *inode,
                goto out;
 
        if (i_size_read(inode) == new_i_size)
-               goto out;
+               goto out;
        BUG_ON(new_i_size < i_size_read(inode));
 
-       /*
-        * Fall through for converting inline data, even if the fs
-        * supports sparse files.
-        *
-        * The check for inline data here is legal - nobody can add
-        * the feature since we have i_mutex. We must check it again
-        * after acquiring ip_alloc_sem though, as paths like mmap
-        * might have raced us to converting the inode to extents.
-        */
-       if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
-           && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
-               goto out_update_size;
-
        /*
         * The alloc sem blocks people in read/write from reading our
         * allocation until we're done changing it. We depend on
         * i_mutex to block other extend/truncate calls while we're
-        * here.
+        * here.  We even have to hold it for sparse files because there
+        * might be some tail zeroing.
         */
        down_write(&oi->ip_alloc_sem);
 
@@ -899,14 +1078,16 @@ static int ocfs2_extend_file(struct inode *inode,
                ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
                if (ret) {
                        up_write(&oi->ip_alloc_sem);
-
                        mlog_errno(ret);
                        goto out;
                }
        }
 
-       if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
-               ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
+       if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+               ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
+       else
+               ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
+                                           new_i_size);
 
        up_write(&oi->ip_alloc_sem);
 
index d66cf4f7c70e34bbec1d63eccd25096eeac7e976..97bf761c9e7c7b8f1c1744bbada1d778095eb3d7 100644 (file)
@@ -54,8 +54,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
 int ocfs2_simple_size_update(struct inode *inode,
                             struct buffer_head *di_bh,
                             u64 new_i_size);
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size,
-                         u64 zero_to);
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+                         u64 new_i_size, u64 zero_to);
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+                     loff_t zero_to);
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
 int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
                  struct kstat *stat);
index 47878cf164184cf910bcbbebc10da133aa2e0c64..625de9d7088cdf2c82008b2e875094ec2b43f1d0 100644 (file)
@@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger
        return container_of(triggers, struct ocfs2_triggers, ot_triggers);
 }
 
-static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  * Quota blocks have their own trigger because the struct ocfs2_block_check
  * offset depends on the blocksize.
  */
-static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  * Directory blocks also have their own trigger because the
  * struct ocfs2_block_check offset depends on the blocksize.
  */
-static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
 
 static struct ocfs2_triggers di_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dinode, i_check),
@@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = {
 
 static struct ocfs2_triggers eb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_extent_block, h_check),
@@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = {
 
 static struct ocfs2_triggers rb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_refcount_block, rf_check),
@@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = {
 
 static struct ocfs2_triggers gd_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_group_desc, bg_check),
@@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = {
 
 static struct ocfs2_triggers db_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_db_commit_trigger,
+               .t_frozen = ocfs2_db_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
 };
 
 static struct ocfs2_triggers xb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_xattr_block, xb_check),
@@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = {
 
 static struct ocfs2_triggers dq_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_dq_commit_trigger,
+               .t_frozen = ocfs2_dq_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
 };
 
 static struct ocfs2_triggers dr_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dx_root_block, dr_check),
@@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = {
 
 static struct ocfs2_triggers dl_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dx_leaf, dl_check),
@@ -1936,7 +1936,7 @@ void ocfs2_orphan_scan_work(struct work_struct *work)
        mutex_lock(&os->os_lock);
        ocfs2_queue_orphan_scan(osb);
        if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
-               schedule_delayed_work(&os->os_orphan_scan_work,
+               queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
                                      ocfs2_orphan_scan_timeout());
        mutex_unlock(&os->os_lock);
 }
@@ -1976,8 +1976,8 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
                atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
        else {
                atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
-               schedule_delayed_work(&os->os_orphan_scan_work,
-                                     ocfs2_orphan_scan_timeout());
+               queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
+                                  ocfs2_orphan_scan_timeout());
        }
 }
 
index 3d7419682dc069da151ae265367f214e15ae31af..ec6adbf8f5515afbaa7e8ffcb8a32b6a3c4e1ea8 100644 (file)
@@ -118,6 +118,7 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
 {
        unsigned int la_mb;
        unsigned int gd_mb;
+       unsigned int la_max_mb;
        unsigned int megs_per_slot;
        struct super_block *sb = osb->sb;
 
@@ -182,6 +183,12 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
        if (megs_per_slot < la_mb)
                la_mb = megs_per_slot;
 
+       /* We can't store more bits than we can in a block. */
+       la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
+                                               ocfs2_local_alloc_size(sb) * 8);
+       if (la_mb > la_max_mb)
+               la_mb = la_max_mb;
+
        return la_mb;
 }
 
index 2bb35fe00511e98f41f23fa9a93d752d3a82b4fe..4607923eb24c192ff3642042161684a158300908 100644 (file)
@@ -775,7 +775,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
                 * locking allocators ranks above a transaction start
                 */
                WARN_ON(journal_current_handle());
-               status = ocfs2_extend_no_holes(gqinode,
+               status = ocfs2_extend_no_holes(gqinode, NULL,
                        gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
                        gqinode->i_size);
                if (status < 0)
index 8bd70d4d184d5827fdb861b7a565fc26d364f0a1..dc78764ccc4c6211bacee0a409f23606e24b3997 100644 (file)
@@ -971,7 +971,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
        u64 p_blkno;
 
        /* We are protected by dqio_sem so no locking needed */
-       status = ocfs2_extend_no_holes(lqinode,
+       status = ocfs2_extend_no_holes(lqinode, NULL,
                                       lqinode->i_size + 2 * sb->s_blocksize,
                                       lqinode->i_size);
        if (status < 0) {
@@ -1114,7 +1114,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
                return ocfs2_local_quota_add_chunk(sb, type, offset);
 
        /* We are protected by dqio_sem so no locking needed */
-       status = ocfs2_extend_no_holes(lqinode,
+       status = ocfs2_extend_no_holes(lqinode, NULL,
                                       lqinode->i_size + sb->s_blocksize,
                                       lqinode->i_size);
        if (status < 0) {
index 4793f36f6518b25f312274d2fcdc84492de26340..3ac5aa733e9c8018090bc2493d819937593760a0 100644 (file)
@@ -2931,6 +2931,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
 
        offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
        end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
+       /*
+        * We only duplicate pages until we reach the page contains i_size - 1.
+        * So trim 'end' to i_size.
+        */
+       if (end > i_size_read(context->inode))
+               end = i_size_read(context->inode);
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
@@ -4166,6 +4172,12 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
        struct inode *inode = old_dentry->d_inode;
        struct buffer_head *new_bh = NULL;
 
+       if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+               ret = -EINVAL;
+               mlog_errno(ret);
+               goto out;
+       }
+
        ret = filemap_fdatawrite(inode->i_mapping);
        if (ret) {
                mlog_errno(ret);
index f4c2a9eb8c4d75a6354fb52c37ba93edb81bebd5..a8e6a95a353f03dcb8a34cf928ded84ff7e6d127 100644 (file)
@@ -741,7 +741,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
                     le16_to_cpu(bg->bg_free_bits_count));
        le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
                     le16_to_cpu(bg->bg_bits));
-       cl->cl_recs[alloc_rec].c_blkno  = cpu_to_le64(bg->bg_blkno);
+       cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
        if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
                le16_add_cpu(&cl->cl_next_free_rec, 1);
 
index e97b34842cfea0d188f85476a56c4a0eb0af5ce0..d03469f618012ea4aff64b62f1f77e3e8b8ec47d 100644 (file)
@@ -709,7 +709,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
                                         struct ocfs2_xattr_value_buf *vb,
                                         struct ocfs2_xattr_set_ctxt *ctxt)
 {
-       int status = 0;
+       int status = 0, credits;
        handle_t *handle = ctxt->handle;
        enum ocfs2_alloc_restarted why;
        u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
@@ -719,38 +719,54 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
 
        ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
 
-       status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
-                             OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
+       while (clusters_to_add) {
+               status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
+                                      OCFS2_JOURNAL_ACCESS_WRITE);
+               if (status < 0) {
+                       mlog_errno(status);
+                       break;
+               }
 
-       prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
-       status = ocfs2_add_clusters_in_btree(handle,
-                                            &et,
-                                            &logical_start,
-                                            clusters_to_add,
-                                            0,
-                                            ctxt->data_ac,
-                                            ctxt->meta_ac,
-                                            &why);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
+               prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
+               status = ocfs2_add_clusters_in_btree(handle,
+                                                    &et,
+                                                    &logical_start,
+                                                    clusters_to_add,
+                                                    0,
+                                                    ctxt->data_ac,
+                                                    ctxt->meta_ac,
+                                                    &why);
+               if ((status < 0) && (status != -EAGAIN)) {
+                       if (status != -ENOSPC)
+                               mlog_errno(status);
+                       break;
+               }
 
-       ocfs2_journal_dirty(handle, vb->vb_bh);
+               ocfs2_journal_dirty(handle, vb->vb_bh);
 
-       clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
+               clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
+                                        prev_clusters;
 
-       /*
-        * We should have already allocated enough space before the transaction,
-        * so no need to restart.
-        */
-       BUG_ON(why != RESTART_NONE || clusters_to_add);
-
-leave:
+               if (why != RESTART_NONE && clusters_to_add) {
+                       /*
+                        * We can only fail in case the alloc file doesn't give
+                        * up enough clusters.
+                        */
+                       BUG_ON(why == RESTART_META);
+
+                       mlog(0, "restarting xattr value extension for %u"
+                            " clusters,.\n", clusters_to_add);
+                       credits = ocfs2_calc_extend_credits(inode->i_sb,
+                                                           &vb->vb_xv->xr_list,
+                                                           clusters_to_add);
+                       status = ocfs2_extend_trans(handle, credits);
+                       if (status < 0) {
+                               status = -ENOMEM;
+                               mlog_errno(status);
+                               break;
+                       }
+               }
+       }
 
        return status;
 }
@@ -6788,16 +6804,15 @@ out:
        return ret;
 }
 
-static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+static int ocfs2_reflink_xattr_bucket(handle_t *handle,
                                u64 blkno, u64 new_blkno, u32 clusters,
+                               u32 *cpos, int num_buckets,
                                struct ocfs2_alloc_context *meta_ac,
                                struct ocfs2_alloc_context *data_ac,
                                struct ocfs2_reflink_xattr_tree_args *args)
 {
        int i, j, ret = 0;
        struct super_block *sb = args->reflink->old_inode->i_sb;
-       u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
-       u32 num_buckets = clusters * bpc;
        int bpb = args->old_bucket->bu_blocks;
        struct ocfs2_xattr_value_buf vb = {
                .vb_access = ocfs2_journal_access,
@@ -6816,14 +6831,6 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                        break;
                }
 
-               /*
-                * The real bucket num in this series of blocks is stored
-                * in the 1st bucket.
-                */
-               if (i == 0)
-                       num_buckets = le16_to_cpu(
-                               bucket_xh(args->old_bucket)->xh_num_buckets);
-
                ret = ocfs2_xattr_bucket_journal_access(handle,
                                                args->new_bucket,
                                                OCFS2_JOURNAL_ACCESS_CREATE);
@@ -6837,6 +6844,18 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                               bucket_block(args->old_bucket, j),
                               sb->s_blocksize);
 
+               /*
+                * Record the start cpos so that we can use it to initialize
+                * our xattr tree we also set the xh_num_bucket for the new
+                * bucket.
+                */
+               if (i == 0) {
+                       *cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
+                                           xh_entries[0].xe_name_hash);
+                       bucket_xh(args->new_bucket)->xh_num_buckets =
+                               cpu_to_le16(num_buckets);
+               }
+
                ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
 
                ret = ocfs2_reflink_xattr_header(handle, args->reflink,
@@ -6866,6 +6885,7 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                }
 
                ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
+
                ocfs2_xattr_bucket_relse(args->old_bucket);
                ocfs2_xattr_bucket_relse(args->new_bucket);
        }
@@ -6874,6 +6894,75 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
        ocfs2_xattr_bucket_relse(args->new_bucket);
        return ret;
 }
+
+static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+                               struct inode *inode,
+                               struct ocfs2_reflink_xattr_tree_args *args,
+                               struct ocfs2_extent_tree *et,
+                               struct ocfs2_alloc_context *meta_ac,
+                               struct ocfs2_alloc_context *data_ac,
+                               u64 blkno, u32 cpos, u32 len)
+{
+       int ret, first_inserted = 0;
+       u32 p_cluster, num_clusters, reflink_cpos = 0;
+       u64 new_blkno;
+       unsigned int num_buckets, reflink_buckets;
+       unsigned int bpc =
+               ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
+
+       ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+       num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
+       ocfs2_xattr_bucket_relse(args->old_bucket);
+
+       while (len && num_buckets) {
+               ret = ocfs2_claim_clusters(handle, data_ac,
+                                          1, &p_cluster, &num_clusters);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
+               reflink_buckets = min(num_buckets, bpc * num_clusters);
+
+               ret = ocfs2_reflink_xattr_bucket(handle, blkno,
+                                                new_blkno, num_clusters,
+                                                &reflink_cpos, reflink_buckets,
+                                                meta_ac, data_ac, args);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               /*
+                * For the 1st allocated cluster, we make it use the same cpos
+                * so that the xattr tree looks the same as the original one
+                * in the most case.
+                */
+               if (!first_inserted) {
+                       reflink_cpos = cpos;
+                       first_inserted = 1;
+               }
+               ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
+                                         num_clusters, 0, meta_ac);
+               if (ret)
+                       mlog_errno(ret);
+
+               mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
+                    (unsigned long long)new_blkno, num_clusters, reflink_cpos);
+
+               len -= num_clusters;
+               blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
+               num_buckets -= reflink_buckets;
+       }
+out:
+       return ret;
+}
+
 /*
  * Create the same xattr extent record in the new inode's xattr tree.
  */
@@ -6885,8 +6974,6 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
                                   void *para)
 {
        int ret, credits = 0;
-       u32 p_cluster, num_clusters;
-       u64 new_blkno;
        handle_t *handle;
        struct ocfs2_reflink_xattr_tree_args *args =
                        (struct ocfs2_reflink_xattr_tree_args *)para;
@@ -6895,6 +6982,9 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
        struct ocfs2_alloc_context *data_ac = NULL;
        struct ocfs2_extent_tree et;
 
+       mlog(0, "reflink xattr buckets %llu len %u\n",
+            (unsigned long long)blkno, len);
+
        ocfs2_init_xattr_tree_extent_tree(&et,
                                          INODE_CACHE(args->reflink->new_inode),
                                          args->new_blk_bh);
@@ -6914,32 +7004,12 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_claim_clusters(handle, data_ac,
-                                  len, &p_cluster, &num_clusters);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
-       }
-
-       new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
-
-       mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
-            (unsigned long long)blkno, (unsigned long long)new_blkno, len);
-       ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
-                                         meta_ac, data_ac, args);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
-       }
-
-       mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
-            (unsigned long long)new_blkno, len, cpos);
-       ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
-                                 len, 0, meta_ac);
+       ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
+                                         meta_ac, data_ac,
+                                         blkno, cpos, len);
        if (ret)
                mlog_errno(ret);
 
-out_commit:
        ocfs2_commit_trans(osb, handle);
 
 out:
index 3e73de5967ff94c9e5896aaf84cd0c6a1c7a4b96..fc8497643fd08f42d48b1bad50c4c30e5f43a782 100644 (file)
@@ -74,6 +74,7 @@ int ibm_partition(struct parsed_partitions *state)
        } *label;
        unsigned char *data;
        Sector sect;
+       sector_t labelsect;
 
        res = 0;
        blocksize = bdev_logical_block_size(bdev);
@@ -97,11 +98,20 @@ int ibm_partition(struct parsed_partitions *state)
            ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
                goto out_freeall;
 
+       /*
+        * Special case for FBA disks: label sector does not depend on
+        * blocksize.
+        */
+       if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
+           (info->cu_type == 0x3880 && info->dev_type == 0x3370))
+               labelsect = info->label_block;
+       else
+               labelsect = info->label_block * (blocksize >> 9);
+
        /*
         * Get volume label, extract name and type.
         */
-       data = read_part_sector(state, info->label_block*(blocksize/512),
-                               &sect);
+       data = read_part_sector(state, labelsect, &sect);
        if (data == NULL)
                goto out_readerr;
 
index 12c233da1b6b77e6c06d80b1bd18dbca93b65c78..437d2ca2de973d1027109b038e895e31a68b3a63 100644 (file)
@@ -676,7 +676,7 @@ static void prune_dqcache(int count)
  * This is called from kswapd when we think we need some
  * more memory
  */
-static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
+static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        if (nr) {
                spin_lock(&dq_list_lock);
index 02feb59cefcac356768d8efdf9e69c9c1ded811e..0b201114a5adf9cfc9c04b6730442748c079fa9d 100644 (file)
@@ -277,7 +277,7 @@ static int kick_a_thread(void)
        return 0;
 }
 
-int ubifs_shrinker(int nr, gfp_t gfp_mask)
+int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
        int freed, contention = 0;
        long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
index 2eef553d50c817e6276e994f4aee80971cdf2191..04310878f449ac44db40c89325dd0f745f2b5cdd 100644 (file)
@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
 int ubifs_tnc_end_commit(struct ubifs_info *c);
 
 /* shrinker.c */
-int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask);
+int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
 
 /* commit.c */
 int ubifs_bg_thread(void *info);
index 649ade8ef598693e2e57ac66475c1993b57903e3..2ee3f7a60163e899e971700aba008a243049726a 100644 (file)
@@ -45,7 +45,7 @@
 
 static kmem_zone_t *xfs_buf_zone;
 STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(int, gfp_t);
+STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
 static struct shrinker xfs_buf_shake = {
        .shrink = xfsbufd_wakeup,
@@ -340,7 +340,7 @@ _xfs_buf_lookup_pages(
                                        __func__, gfp_mask);
 
                        XFS_STATS_INC(xb_page_retries);
-                       xfsbufd_wakeup(0, gfp_mask);
+                       xfsbufd_wakeup(NULL, 0, gfp_mask);
                        congestion_wait(BLK_RW_ASYNC, HZ/50);
                        goto retry;
                }
@@ -1762,6 +1762,7 @@ xfs_buf_runall_queues(
 
 STATIC int
 xfsbufd_wakeup(
+       struct shrinker         *shrink,
        int                     priority,
        gfp_t                   mask)
 {
index f2d1718c9165f104befa17069c7b48cfd93a4b3a..80938c736c2769861327ea10368cd0c20d4ceb51 100644 (file)
@@ -1883,7 +1883,6 @@ init_xfs_fs(void)
                goto out_cleanup_procfs;
 
        vfs_initquota();
-       xfs_inode_shrinker_init();
 
        error = register_filesystem(&xfs_fs_type);
        if (error)
@@ -1911,7 +1910,6 @@ exit_xfs_fs(void)
 {
        vfs_exitquota();
        unregister_filesystem(&xfs_fs_type);
-       xfs_inode_shrinker_destroy();
        xfs_sysctl_unregister();
        xfs_cleanup_procfs();
        xfs_buf_terminate();
index ef7f0218bccb45779157128139ab552e8b423832..a51a07c3a70cfa8b5514add3dae137402bbc56b6 100644 (file)
@@ -144,6 +144,41 @@ restart:
        return last_error;
 }
 
+/*
+ * Select the next per-ag structure to iterate during the walk. The reclaim
+ * walk is optimised only to walk AGs with reclaimable inodes in them.
+ */
+static struct xfs_perag *
+xfs_inode_ag_iter_next_pag(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          *first,
+       int                     tag)
+{
+       struct xfs_perag        *pag = NULL;
+
+       if (tag == XFS_ICI_RECLAIM_TAG) {
+               int found;
+               int ref;
+
+               spin_lock(&mp->m_perag_lock);
+               found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+                               (void **)&pag, *first, 1, tag);
+               if (found <= 0) {
+                       spin_unlock(&mp->m_perag_lock);
+                       return NULL;
+               }
+               *first = pag->pag_agno + 1;
+               /* open coded pag reference increment */
+               ref = atomic_inc_return(&pag->pag_ref);
+               spin_unlock(&mp->m_perag_lock);
+               trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
+       } else {
+               pag = xfs_perag_get(mp, *first);
+               (*first)++;
+       }
+       return pag;
+}
+
 int
 xfs_inode_ag_iterator(
        struct xfs_mount        *mp,
@@ -154,16 +189,15 @@ xfs_inode_ag_iterator(
        int                     exclusive,
        int                     *nr_to_scan)
 {
+       struct xfs_perag        *pag;
        int                     error = 0;
        int                     last_error = 0;
        xfs_agnumber_t          ag;
        int                     nr;
 
        nr = nr_to_scan ? *nr_to_scan : INT_MAX;
-       for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
-               struct xfs_perag        *pag;
-
-               pag = xfs_perag_get(mp, ag);
+       ag = 0;
+       while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) {
                error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
                                                exclusive, &nr);
                xfs_perag_put(pag);
@@ -640,6 +674,17 @@ __xfs_inode_set_reclaim_tag(
        radix_tree_tag_set(&pag->pag_ici_root,
                           XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
                           XFS_ICI_RECLAIM_TAG);
+
+       if (!pag->pag_ici_reclaimable) {
+               /* propagate the reclaim tag up into the perag radix tree */
+               spin_lock(&ip->i_mount->m_perag_lock);
+               radix_tree_tag_set(&ip->i_mount->m_perag_tree,
+                               XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
+                               XFS_ICI_RECLAIM_TAG);
+               spin_unlock(&ip->i_mount->m_perag_lock);
+               trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
+                                                       -1, _RET_IP_);
+       }
        pag->pag_ici_reclaimable++;
 }
 
@@ -674,6 +719,16 @@ __xfs_inode_clear_reclaim_tag(
        radix_tree_tag_clear(&pag->pag_ici_root,
                        XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
        pag->pag_ici_reclaimable--;
+       if (!pag->pag_ici_reclaimable) {
+               /* clear the reclaim tag from the perag radix tree */
+               spin_lock(&ip->i_mount->m_perag_lock);
+               radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
+                               XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
+                               XFS_ICI_RECLAIM_TAG);
+               spin_unlock(&ip->i_mount->m_perag_lock);
+               trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
+                                                       -1, _RET_IP_);
+       }
 }
 
 /*
@@ -828,83 +883,52 @@ xfs_reclaim_inodes(
 
 /*
  * Shrinker infrastructure.
- *
- * This is all far more complex than it needs to be. It adds a global list of
- * mounts because the shrinkers can only call a global context. We need to make
- * the shrinkers pass a context to avoid the need for global state.
  */
-static LIST_HEAD(xfs_mount_list);
-static struct rw_semaphore xfs_mount_list_lock;
-
 static int
 xfs_reclaim_inode_shrink(
+       struct shrinker *shrink,
        int             nr_to_scan,
        gfp_t           gfp_mask)
 {
        struct xfs_mount *mp;
        struct xfs_perag *pag;
        xfs_agnumber_t  ag;
-       int             reclaimable = 0;
+       int             reclaimable;
 
+       mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
        if (nr_to_scan) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
 
-               down_read(&xfs_mount_list_lock);
-               list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
-                       xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
+               xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
                                        XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
-                       if (nr_to_scan <= 0)
-                               break;
-               }
-               up_read(&xfs_mount_list_lock);
-       }
+               /* if we don't exhaust the scan, don't bother coming back */
+               if (nr_to_scan > 0)
+                       return -1;
+       }
 
-       down_read(&xfs_mount_list_lock);
-       list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
-               for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
-                       pag = xfs_perag_get(mp, ag);
-                       reclaimable += pag->pag_ici_reclaimable;
-                       xfs_perag_put(pag);
-               }
+       reclaimable = 0;
+       ag = 0;
+       while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag,
+                                       XFS_ICI_RECLAIM_TAG))) {
+               reclaimable += pag->pag_ici_reclaimable;
+               xfs_perag_put(pag);
        }
-       up_read(&xfs_mount_list_lock);
        return reclaimable;
 }
 
-static struct shrinker xfs_inode_shrinker = {
-       .shrink = xfs_reclaim_inode_shrink,
-       .seeks = DEFAULT_SEEKS,
-};
-
-void __init
-xfs_inode_shrinker_init(void)
-{
-       init_rwsem(&xfs_mount_list_lock);
-       register_shrinker(&xfs_inode_shrinker);
-}
-
-void
-xfs_inode_shrinker_destroy(void)
-{
-       ASSERT(list_empty(&xfs_mount_list));
-       unregister_shrinker(&xfs_inode_shrinker);
-}
-
 void
 xfs_inode_shrinker_register(
        struct xfs_mount        *mp)
 {
-       down_write(&xfs_mount_list_lock);
-       list_add_tail(&mp->m_mplist, &xfs_mount_list);
-       up_write(&xfs_mount_list_lock);
+       mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
+       mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
+       register_shrinker(&mp->m_inode_shrink);
 }
 
 void
 xfs_inode_shrinker_unregister(
        struct xfs_mount        *mp)
 {
-       down_write(&xfs_mount_list_lock);
-       list_del(&mp->m_mplist);
-       up_write(&xfs_mount_list_lock);
+       unregister_shrinker(&mp->m_inode_shrink);
 }
index cdcbaaca9880d04dfa8f2146580b0ef216ed178f..e28139aaa4aa42c6085100856345e0156a33eef3 100644 (file)
@@ -55,8 +55,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp,
        int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
        int flags, int tag, int write_lock, int *nr_to_scan);
 
-void xfs_inode_shrinker_init(void);
-void xfs_inode_shrinker_destroy(void);
 void xfs_inode_shrinker_register(struct xfs_mount *mp);
 void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
 
index 73d5aa117384bc1e8c626012dba881e02a5deb57..302820690904d1eff4de201ca97ca97a5fad4c9d 100644 (file)
@@ -124,7 +124,10 @@ DEFINE_EVENT(xfs_perag_class, name,        \
                 unsigned long caller_ip),                                      \
        TP_ARGS(mp, agno, refcount, caller_ip))
 DEFINE_PERAG_REF_EVENT(xfs_perag_get);
+DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim);
 DEFINE_PERAG_REF_EVENT(xfs_perag_put);
+DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
+DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
 
 TRACE_EVENT(xfs_attr_list_node_descend,
        TP_PROTO(struct xfs_attr_list_context *ctx,
index 8c117ff2e3ab00d8939f7ec596063988faae283f..67c018392d62a8ecef3c93f35c2b942b0b52137d 100644 (file)
@@ -69,7 +69,7 @@ STATIC void   xfs_qm_list_destroy(xfs_dqlist_t *);
 
 STATIC int     xfs_qm_init_quotainos(xfs_mount_t *);
 STATIC int     xfs_qm_init_quotainfo(xfs_mount_t *);
-STATIC int     xfs_qm_shake(int, gfp_t);
+STATIC int     xfs_qm_shake(struct shrinker *, int, gfp_t);
 
 static struct shrinker xfs_qm_shaker = {
        .shrink = xfs_qm_shake,
@@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist(
  */
 /* ARGSUSED */
 STATIC int
-xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
+xfs_qm_shake(
+       struct shrinker *shrink,
+       int             nr_to_scan,
+       gfp_t           gfp_mask)
 {
        int     ndqused, nfree, n;
 
index 1d2c7eed4eda6e3fb545f6b3dd4e0fb30515ea5f..5761087ee8ea31be3e4c571630ceca807485e5e3 100644 (file)
@@ -259,7 +259,7 @@ typedef struct xfs_mount {
        wait_queue_head_t       m_wait_single_sync_task;
        __int64_t               m_update_flags; /* sb flags we need to update
                                                   on the next remount,rw */
-       struct list_head        m_mplist;       /* inode shrinker mount list */
+       struct shrinker         m_inode_shrink; /* inode reclaim shrinker */
 } xfs_mount_t;
 
 /*
index 013dc529e95ffc43121fbf8cfbac9d432fa29bc6..d147461bc2712b9c9fb7aae84d65e600d6c60fd4 100644 (file)
@@ -61,7 +61,8 @@ struct files_struct {
        (rcu_dereference_check((fdtfd), \
                               rcu_read_lock_held() || \
                               lockdep_is_held(&(files)->file_lock) || \
-                              atomic_read(&(files)->count) == 1))
+                              atomic_read(&(files)->count) == 1 || \
+                              rcu_my_thread_group_empty()))
 
 #define files_fdtable(files) \
                (rcu_dereference_check_fdtable((files), (files)->fdt))
index 9bf6870ee5f4cff00d8bd3dfea9ee374ebfb2e2b..a986ff588944ae0a5bb1ccc7017150324b488210 100644 (file)
@@ -46,31 +46,31 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
 
 #else
 
-void i8042_lock_chip(void)
+static inline void i8042_lock_chip(void)
 {
 }
 
-void i8042_unlock_chip(void)
+static inline void i8042_unlock_chip(void)
 {
 }
 
-int i8042_command(unsigned char *param, int command)
+static inline int i8042_command(unsigned char *param, int command)
 {
        return -ENODEV;
 }
 
-bool i8042_check_port_owner(const struct serio *serio)
+static inline bool i8042_check_port_owner(const struct serio *serio)
 {
        return false;
 }
 
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
                                        struct serio *serio))
 {
        return -ENODEV;
 }
 
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
                                       struct serio *serio))
 {
        return -ENODEV;
index a4d2e9f7088ada70d8b1357f418c32cd09a5bf1a..adf832dec3f37dd639e8aa24fe3cc29c7504a6a7 100644 (file)
@@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 struct jbd2_buffer_trigger_type {
        /*
-        * Fired just before a buffer is written to the journal.
-        * mapped_data is a mapped buffer that is the frozen data for
-        * commit.
+        * Fired a the moment data to write to the journal are known to be
+        * stable - so either at the moment b_frozen_data is created or just
+        * before a buffer is written to the journal.  mapped_data is a mapped
+        * buffer that is the frozen data for commit.
         */
-       void (*t_commit)(struct jbd2_buffer_trigger_type *type,
+       void (*t_frozen)(struct jbd2_buffer_trigger_type *type,
                         struct buffer_head *bh, void *mapped_data,
                         size_t size);
 
@@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type {
                        struct buffer_head *bh);
 };
 
-extern void jbd2_buffer_commit_trigger(struct journal_head *jh,
+extern void jbd2_buffer_frozen_trigger(struct journal_head *jh,
                                       void *mapped_data,
                                       struct jbd2_buffer_trigger_type *triggers);
 extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
deleted file mode 100644 (file)
index f3d1433..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _LINUX_LMB_H
-#define _LINUX_LMB_H
-#ifdef __KERNEL__
-
-/*
- * Logical memory blocks.
- *
- * Copyright (C) 2001 Peter Bergner, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-
-#define MAX_LMB_REGIONS 128
-
-struct lmb_property {
-       u64 base;
-       u64 size;
-};
-
-struct lmb_region {
-       unsigned long cnt;
-       u64 size;
-       struct lmb_property region[MAX_LMB_REGIONS+1];
-};
-
-struct lmb {
-       unsigned long debug;
-       u64 rmo_size;
-       struct lmb_region memory;
-       struct lmb_region reserved;
-};
-
-extern struct lmb lmb;
-
-extern void __init lmb_init(void);
-extern void __init lmb_analyze(void);
-extern long lmb_add(u64 base, u64 size);
-extern long lmb_remove(u64 base, u64 size);
-extern long __init lmb_free(u64 base, u64 size);
-extern long __init lmb_reserve(u64 base, u64 size);
-extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
-                               u64 (*nid_range)(u64, u64, int *));
-extern u64 __init lmb_alloc(u64 size, u64 align);
-extern u64 __init lmb_alloc_base(u64 size,
-               u64, u64 max_addr);
-extern u64 __init __lmb_alloc_base(u64 size,
-               u64 align, u64 max_addr);
-extern u64 __init lmb_phys_mem_size(void);
-extern u64 lmb_end_of_DRAM(void);
-extern void __init lmb_enforce_memory_limit(u64 memory_limit);
-extern int __init lmb_is_reserved(u64 addr);
-extern int lmb_is_region_reserved(u64 base, u64 size);
-extern int lmb_find(struct lmb_property *res);
-
-extern void lmb_dump_all(void);
-
-static inline u64
-lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
-{
-       return type->region[region_nr].size;
-}
-static inline u64
-lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
-{
-       return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
-}
-static inline u64
-lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
-{
-       return type->region[region_nr].base >> PAGE_SHIFT;
-}
-static inline u64
-lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
-{
-       return lmb_start_pfn(type, region_nr) +
-              lmb_size_pages(type, region_nr);
-}
-
-#include <asm/lmb.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_LMB_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
new file mode 100644 (file)
index 0000000..a59faf2
--- /dev/null
@@ -0,0 +1,89 @@
+#ifndef _LINUX_MEMBLOCK_H
+#define _LINUX_MEMBLOCK_H
+#ifdef __KERNEL__
+
+/*
+ * Logical memory blocks.
+ *
+ * Copyright (C) 2001 Peter Bergner, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#define MAX_MEMBLOCK_REGIONS 128
+
+struct memblock_property {
+       u64 base;
+       u64 size;
+};
+
+struct memblock_region {
+       unsigned long cnt;
+       u64 size;
+       struct memblock_property region[MAX_MEMBLOCK_REGIONS+1];
+};
+
+struct memblock {
+       unsigned long debug;
+       u64 rmo_size;
+       struct memblock_region memory;
+       struct memblock_region reserved;
+};
+
+extern struct memblock memblock;
+
+extern void __init memblock_init(void);
+extern void __init memblock_analyze(void);
+extern long memblock_add(u64 base, u64 size);
+extern long memblock_remove(u64 base, u64 size);
+extern long __init memblock_free(u64 base, u64 size);
+extern long __init memblock_reserve(u64 base, u64 size);
+extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
+                               u64 (*nid_range)(u64, u64, int *));
+extern u64 __init memblock_alloc(u64 size, u64 align);
+extern u64 __init memblock_alloc_base(u64 size,
+               u64, u64 max_addr);
+extern u64 __init __memblock_alloc_base(u64 size,
+               u64 align, u64 max_addr);
+extern u64 __init memblock_phys_mem_size(void);
+extern u64 memblock_end_of_DRAM(void);
+extern void __init memblock_enforce_memory_limit(u64 memory_limit);
+extern int __init memblock_is_reserved(u64 addr);
+extern int memblock_is_region_reserved(u64 base, u64 size);
+extern int memblock_find(struct memblock_property *res);
+
+extern void memblock_dump_all(void);
+
+static inline u64
+memblock_size_bytes(struct memblock_region *type, unsigned long region_nr)
+{
+       return type->region[region_nr].size;
+}
+static inline u64
+memblock_size_pages(struct memblock_region *type, unsigned long region_nr)
+{
+       return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT;
+}
+static inline u64
+memblock_start_pfn(struct memblock_region *type, unsigned long region_nr)
+{
+       return type->region[region_nr].base >> PAGE_SHIFT;
+}
+static inline u64
+memblock_end_pfn(struct memblock_region *type, unsigned long region_nr)
+{
+       return memblock_start_pfn(type, region_nr) +
+              memblock_size_pages(type, region_nr);
+}
+
+#include <asm/memblock.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_MEMBLOCK_H */
index b969efb03787ee69546995ad378ac54b6bc37e3b..a2b48041b91084865a71c74b3f13342bf02fc6be 100644 (file)
@@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
  * querying the cache size, so a fastpath for that case is appropriate.
  */
 struct shrinker {
-       int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+       int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
        int seeks;      /* seeks to recreate an obj */
 
        /* These are for internal use */
index 7cb00845f150ca185b73a47931c6a1fbcd4c1cef..f26fda76b87fc66816f2fa286e97d760c545f165 100644 (file)
@@ -288,6 +288,7 @@ struct pci_dev {
         */
        unsigned int    irq;
        struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+       resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */
 
        /* These fields are used by common fixups */
        unsigned int    transparent:1;  /* Transparent PCI bridge */
index 7f614ce274a9198e969d573834d8782f95b8e6a3..13ebb5413a7982c5fdd1153432794f39a708f362 100644 (file)
@@ -124,7 +124,8 @@ extern struct trace_event_functions enter_syscall_print_funcs;
 extern struct trace_event_functions exit_syscall_print_funcs;
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)                               \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_enter_##sname;            \
        static struct ftrace_event_call __used                          \
@@ -138,7 +139,8 @@ extern struct trace_event_functions exit_syscall_print_funcs;
        }
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_exit_##sname;             \
        static struct ftrace_event_call __used                          \
index 731150d52799ecc5492b3590c3b7b5b34de09072..0a691ea7654aefb403e25039f08641691d3de74c 100644 (file)
@@ -1224,12 +1224,7 @@ static inline void sk_tx_queue_clear(struct sock *sk)
 
 static inline int sk_tx_queue_get(const struct sock *sk)
 {
-       return sk->sk_tx_queue_mapping;
-}
-
-static inline bool sk_tx_queue_recorded(const struct sock *sk)
-{
-       return (sk && sk->sk_tx_queue_mapping >= 0);
+       return sk ? sk->sk_tx_queue_mapping : -1;
 }
 
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
index 506c8491a8d131761432aa306e51eef441863887..40a8f462a8224b298690cb07892f93afe8c15214 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1256,6 +1256,33 @@ out:
        return un;
 }
 
+
+/**
+ * get_queue_result - Retrieve the result code from sem_queue
+ * @q: Pointer to queue structure
+ *
+ * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
+ * q->status, then we must loop until the value is replaced with the final
+ * value: This may happen if a task is woken up by an unrelated event (e.g.
+ * signal) and in parallel the task is woken up by another task because it got
+ * the requested semaphores.
+ *
+ * The function can be called with or without holding the semaphore spinlock.
+ */
+static int get_queue_result(struct sem_queue *q)
+{
+       int error;
+
+       error = q->status;
+       while (unlikely(error == IN_WAKEUP)) {
+               cpu_relax();
+               error = q->status;
+       }
+
+       return error;
+}
+
+
 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                unsigned, nsops, const struct timespec __user *, timeout)
 {
@@ -1409,15 +1436,18 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        else
                schedule();
 
-       error = queue.status;
-       while(unlikely(error == IN_WAKEUP)) {
-               cpu_relax();
-               error = queue.status;
-       }
+       error = get_queue_result(&queue);
 
        if (error != -EINTR) {
                /* fast path: update_queue already obtained all requested
-                * resources */
+                * resources.
+                * Perform a smp_mb(): User space could assume that semop()
+                * is a memory barrier: Without the mb(), the cpu could
+                * speculatively read in user space stale data that was
+                * overwritten by the previous owner of the semaphore.
+                */
+               smp_mb();
+
                goto out_free;
        }
 
@@ -1427,10 +1457,12 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                goto out_free;
        }
 
+       error = get_queue_result(&queue);
+
        /*
         * If queue.status != -EINTR we are woken up by another process
         */
-       error = queue.status;
+
        if (error != -EINTR) {
                goto out_unlock_free;
        }
index 31aa9332ef3f8d4f5447f72a3989cbfd0dc7c6df..7bfae887f211556961515b5d057cdcf2880a71c9 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/bootmem.h>
 #include <linux/mm.h>
 #include <linux/early_res.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
 
 /*
  * Early reserved memory areas.
@@ -319,6 +321,8 @@ void __init free_early(u64 start, u64 end)
        struct early_res *r;
        int i;
 
+       kmemleak_free_part(__va(start), end - start);
+
        i = find_overlapped_early(start, end);
        r = &early_res[i];
        if (i >= max_early_res || r->end != end || r->start != start)
@@ -333,6 +337,8 @@ void __init free_early_partial(u64 start, u64 end)
        struct early_res *r;
        int i;
 
+       kmemleak_free_part(__va(start), end - start);
+
        if (start == end)
                return;
 
index 170d8ca901d8c61faf09a1aff5846c65b96eced1..5b916bc0fbaeeae5a093d9ffbf764d69db746906 100644 (file)
@@ -181,9 +181,6 @@ config HAS_DMA
 config CHECK_SIGNATURE
        bool
 
-config HAVE_LMB
-       boolean
-
 config CPUMASK_OFFSTACK
        bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
        help
index 3f1062cbbff47baa14d46b206fe862d5972b89c6..0bfabba1bb3268cb8988e5123f221343b771e68a 100644 (file)
@@ -89,8 +89,6 @@ obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
 
 lib-$(CONFIG_GENERIC_BUG) += bug.o
 
-obj-$(CONFIG_HAVE_LMB) += lmb.o
-
 obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
 
 obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644 (file)
index b1fc526..0000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Procedures for maintaining information about logical memory blocks.
- *
- * Peter Bergner, IBM Corp.    June 2001.
- * Copyright (C) 2001 Peter Bergner.
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/lmb.h>
-
-#define LMB_ALLOC_ANYWHERE     0
-
-struct lmb lmb;
-
-static int lmb_debug;
-
-static int __init early_lmb(char *p)
-{
-       if (p && strstr(p, "debug"))
-               lmb_debug = 1;
-       return 0;
-}
-early_param("lmb", early_lmb);
-
-static void lmb_dump(struct lmb_region *region, char *name)
-{
-       unsigned long long base, size;
-       int i;
-
-       pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
-
-       for (i = 0; i < region->cnt; i++) {
-               base = region->region[i].base;
-               size = region->region[i].size;
-
-               pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
-                   name, i, base, base + size - 1, size);
-       }
-}
-
-void lmb_dump_all(void)
-{
-       if (!lmb_debug)
-               return;
-
-       pr_info("LMB configuration:\n");
-       pr_info(" rmo_size    = 0x%llx\n", (unsigned long long)lmb.rmo_size);
-       pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
-
-       lmb_dump(&lmb.memory, "memory");
-       lmb_dump(&lmb.reserved, "reserved");
-}
-
-static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
-                                       u64 size2)
-{
-       return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
-}
-
-static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
-{
-       if (base2 == base1 + size1)
-               return 1;
-       else if (base1 == base2 + size2)
-               return -1;
-
-       return 0;
-}
-
-static long lmb_regions_adjacent(struct lmb_region *rgn,
-               unsigned long r1, unsigned long r2)
-{
-       u64 base1 = rgn->region[r1].base;
-       u64 size1 = rgn->region[r1].size;
-       u64 base2 = rgn->region[r2].base;
-       u64 size2 = rgn->region[r2].size;
-
-       return lmb_addrs_adjacent(base1, size1, base2, size2);
-}
-
-static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
-{
-       unsigned long i;
-
-       for (i = r; i < rgn->cnt - 1; i++) {
-               rgn->region[i].base = rgn->region[i + 1].base;
-               rgn->region[i].size = rgn->region[i + 1].size;
-       }
-       rgn->cnt--;
-}
-
-/* Assumption: base addr of region 1 < base addr of region 2 */
-static void lmb_coalesce_regions(struct lmb_region *rgn,
-               unsigned long r1, unsigned long r2)
-{
-       rgn->region[r1].size += rgn->region[r2].size;
-       lmb_remove_region(rgn, r2);
-}
-
-void __init lmb_init(void)
-{
-       /* Create a dummy zero size LMB which will get coalesced away later.
-        * This simplifies the lmb_add() code below...
-        */
-       lmb.memory.region[0].base = 0;
-       lmb.memory.region[0].size = 0;
-       lmb.memory.cnt = 1;
-
-       /* Ditto. */
-       lmb.reserved.region[0].base = 0;
-       lmb.reserved.region[0].size = 0;
-       lmb.reserved.cnt = 1;
-}
-
-void __init lmb_analyze(void)
-{
-       int i;
-
-       lmb.memory.size = 0;
-
-       for (i = 0; i < lmb.memory.cnt; i++)
-               lmb.memory.size += lmb.memory.region[i].size;
-}
-
-static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
-{
-       unsigned long coalesced = 0;
-       long adjacent, i;
-
-       if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
-               rgn->region[0].base = base;
-               rgn->region[0].size = size;
-               return 0;
-       }
-
-       /* First try and coalesce this LMB with another. */
-       for (i = 0; i < rgn->cnt; i++) {
-               u64 rgnbase = rgn->region[i].base;
-               u64 rgnsize = rgn->region[i].size;
-
-               if ((rgnbase == base) && (rgnsize == size))
-                       /* Already have this region, so we're done */
-                       return 0;
-
-               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
-               if (adjacent > 0) {
-                       rgn->region[i].base -= size;
-                       rgn->region[i].size += size;
-                       coalesced++;
-                       break;
-               } else if (adjacent < 0) {
-                       rgn->region[i].size += size;
-                       coalesced++;
-                       break;
-               }
-       }
-
-       if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
-               lmb_coalesce_regions(rgn, i, i+1);
-               coalesced++;
-       }
-
-       if (coalesced)
-               return coalesced;
-       if (rgn->cnt >= MAX_LMB_REGIONS)
-               return -1;
-
-       /* Couldn't coalesce the LMB, so add it to the sorted table. */
-       for (i = rgn->cnt - 1; i >= 0; i--) {
-               if (base < rgn->region[i].base) {
-                       rgn->region[i+1].base = rgn->region[i].base;
-                       rgn->region[i+1].size = rgn->region[i].size;
-               } else {
-                       rgn->region[i+1].base = base;
-                       rgn->region[i+1].size = size;
-                       break;
-               }
-       }
-
-       if (base < rgn->region[0].base) {
-               rgn->region[0].base = base;
-               rgn->region[0].size = size;
-       }
-       rgn->cnt++;
-
-       return 0;
-}
-
-long lmb_add(u64 base, u64 size)
-{
-       struct lmb_region *_rgn = &lmb.memory;
-
-       /* On pSeries LPAR systems, the first LMB is our RMO region. */
-       if (base == 0)
-               lmb.rmo_size = size;
-
-       return lmb_add_region(_rgn, base, size);
-
-}
-
-static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
-{
-       u64 rgnbegin, rgnend;
-       u64 end = base + size;
-       int i;
-
-       rgnbegin = rgnend = 0; /* supress gcc warnings */
-
-       /* Find the region where (base, size) belongs to */
-       for (i=0; i < rgn->cnt; i++) {
-               rgnbegin = rgn->region[i].base;
-               rgnend = rgnbegin + rgn->region[i].size;
-
-               if ((rgnbegin <= base) && (end <= rgnend))
-                       break;
-       }
-
-       /* Didn't find the region */
-       if (i == rgn->cnt)
-               return -1;
-
-       /* Check to see if we are removing entire region */
-       if ((rgnbegin == base) && (rgnend == end)) {
-               lmb_remove_region(rgn, i);
-               return 0;
-       }
-
-       /* Check to see if region is matching at the front */
-       if (rgnbegin == base) {
-               rgn->region[i].base = end;
-               rgn->region[i].size -= size;
-               return 0;
-       }
-
-       /* Check to see if the region is matching at the end */
-       if (rgnend == end) {
-               rgn->region[i].size -= size;
-               return 0;
-       }
-
-       /*
-        * We need to split the entry -  adjust the current one to the
-        * beginging of the hole and add the region after hole.
-        */
-       rgn->region[i].size = base - rgn->region[i].base;
-       return lmb_add_region(rgn, end, rgnend - end);
-}
-
-long lmb_remove(u64 base, u64 size)
-{
-       return __lmb_remove(&lmb.memory, base, size);
-}
-
-long __init lmb_free(u64 base, u64 size)
-{
-       return __lmb_remove(&lmb.reserved, base, size);
-}
-
-long __init lmb_reserve(u64 base, u64 size)
-{
-       struct lmb_region *_rgn = &lmb.reserved;
-
-       BUG_ON(0 == size);
-
-       return lmb_add_region(_rgn, base, size);
-}
-
-long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
-{
-       unsigned long i;
-
-       for (i = 0; i < rgn->cnt; i++) {
-               u64 rgnbase = rgn->region[i].base;
-               u64 rgnsize = rgn->region[i].size;
-               if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
-                       break;
-       }
-
-       return (i < rgn->cnt) ? i : -1;
-}
-
-static u64 lmb_align_down(u64 addr, u64 size)
-{
-       return addr & ~(size - 1);
-}
-
-static u64 lmb_align_up(u64 addr, u64 size)
-{
-       return (addr + (size - 1)) & ~(size - 1);
-}
-
-static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
-                                          u64 size, u64 align)
-{
-       u64 base, res_base;
-       long j;
-
-       base = lmb_align_down((end - size), align);
-       while (start <= base) {
-               j = lmb_overlaps_region(&lmb.reserved, base, size);
-               if (j < 0) {
-                       /* this area isn't reserved, take it */
-                       if (lmb_add_region(&lmb.reserved, base, size) < 0)
-                               base = ~(u64)0;
-                       return base;
-               }
-               res_base = lmb.reserved.region[j].base;
-               if (res_base < size)
-                       break;
-               base = lmb_align_down(res_base - size, align);
-       }
-
-       return ~(u64)0;
-}
-
-static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
-                                      u64 (*nid_range)(u64, u64, int *),
-                                      u64 size, u64 align, int nid)
-{
-       u64 start, end;
-
-       start = mp->base;
-       end = start + mp->size;
-
-       start = lmb_align_up(start, align);
-       while (start < end) {
-               u64 this_end;
-               int this_nid;
-
-               this_end = nid_range(start, end, &this_nid);
-               if (this_nid == nid) {
-                       u64 ret = lmb_alloc_nid_unreserved(start, this_end,
-                                                          size, align);
-                       if (ret != ~(u64)0)
-                               return ret;
-               }
-               start = this_end;
-       }
-
-       return ~(u64)0;
-}
-
-u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
-                        u64 (*nid_range)(u64 start, u64 end, int *nid))
-{
-       struct lmb_region *mem = &lmb.memory;
-       int i;
-
-       BUG_ON(0 == size);
-
-       size = lmb_align_up(size, align);
-
-       for (i = 0; i < mem->cnt; i++) {
-               u64 ret = lmb_alloc_nid_region(&mem->region[i],
-                                              nid_range,
-                                              size, align, nid);
-               if (ret != ~(u64)0)
-                       return ret;
-       }
-
-       return lmb_alloc(size, align);
-}
-
-u64 __init lmb_alloc(u64 size, u64 align)
-{
-       return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
-}
-
-u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
-{
-       u64 alloc;
-
-       alloc = __lmb_alloc_base(size, align, max_addr);
-
-       if (alloc == 0)
-               panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
-                     (unsigned long long) size, (unsigned long long) max_addr);
-
-       return alloc;
-}
-
-u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
-{
-       long i, j;
-       u64 base = 0;
-       u64 res_base;
-
-       BUG_ON(0 == size);
-
-       size = lmb_align_up(size, align);
-
-       /* On some platforms, make sure we allocate lowmem */
-       /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
-       if (max_addr == LMB_ALLOC_ANYWHERE)
-               max_addr = LMB_REAL_LIMIT;
-
-       for (i = lmb.memory.cnt - 1; i >= 0; i--) {
-               u64 lmbbase = lmb.memory.region[i].base;
-               u64 lmbsize = lmb.memory.region[i].size;
-
-               if (lmbsize < size)
-                       continue;
-               if (max_addr == LMB_ALLOC_ANYWHERE)
-                       base = lmb_align_down(lmbbase + lmbsize - size, align);
-               else if (lmbbase < max_addr) {
-                       base = min(lmbbase + lmbsize, max_addr);
-                       base = lmb_align_down(base - size, align);
-               } else
-                       continue;
-
-               while (base && lmbbase <= base) {
-                       j = lmb_overlaps_region(&lmb.reserved, base, size);
-                       if (j < 0) {
-                               /* this area isn't reserved, take it */
-                               if (lmb_add_region(&lmb.reserved, base, size) < 0)
-                                       return 0;
-                               return base;
-                       }
-                       res_base = lmb.reserved.region[j].base;
-                       if (res_base < size)
-                               break;
-                       base = lmb_align_down(res_base - size, align);
-               }
-       }
-       return 0;
-}
-
-/* You must call lmb_analyze() before this. */
-u64 __init lmb_phys_mem_size(void)
-{
-       return lmb.memory.size;
-}
-
-u64 lmb_end_of_DRAM(void)
-{
-       int idx = lmb.memory.cnt - 1;
-
-       return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
-}
-
-/* You must call lmb_analyze() after this. */
-void __init lmb_enforce_memory_limit(u64 memory_limit)
-{
-       unsigned long i;
-       u64 limit;
-       struct lmb_property *p;
-
-       if (!memory_limit)
-               return;
-
-       /* Truncate the lmb regions to satisfy the memory limit. */
-       limit = memory_limit;
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               if (limit > lmb.memory.region[i].size) {
-                       limit -= lmb.memory.region[i].size;
-                       continue;
-               }
-
-               lmb.memory.region[i].size = limit;
-               lmb.memory.cnt = i + 1;
-               break;
-       }
-
-       if (lmb.memory.region[0].size < lmb.rmo_size)
-               lmb.rmo_size = lmb.memory.region[0].size;
-
-       memory_limit = lmb_end_of_DRAM();
-
-       /* And truncate any reserves above the limit also. */
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               p = &lmb.reserved.region[i];
-
-               if (p->base > memory_limit)
-                       p->size = 0;
-               else if ((p->base + p->size) > memory_limit)
-                       p->size = memory_limit - p->base;
-
-               if (p->size == 0) {
-                       lmb_remove_region(&lmb.reserved, i);
-                       i--;
-               }
-       }
-}
-
-int __init lmb_is_reserved(u64 addr)
-{
-       int i;
-
-       for (i = 0; i < lmb.reserved.cnt; i++) {
-               u64 upper = lmb.reserved.region[i].base +
-                       lmb.reserved.region[i].size - 1;
-               if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
-                       return 1;
-       }
-       return 0;
-}
-
-int lmb_is_region_reserved(u64 base, u64 size)
-{
-       return lmb_overlaps_region(&lmb.reserved, base, size);
-}
-
-/*
- * Given a <base, len>, find which memory regions belong to this range.
- * Adjust the request and return a contiguous chunk.
- */
-int lmb_find(struct lmb_property *res)
-{
-       int i;
-       u64 rstart, rend;
-
-       rstart = res->base;
-       rend = rstart + res->size - 1;
-
-       for (i = 0; i < lmb.memory.cnt; i++) {
-               u64 start = lmb.memory.region[i].base;
-               u64 end = start + lmb.memory.region[i].size - 1;
-
-               if (start > rend)
-                       return -1;
-
-               if ((end >= rstart) && (start < rend)) {
-                       /* adjust the request */
-                       if (rstart < start)
-                               rstart = start;
-                       if (rend > end)
-                               rend = end;
-                       res->base = rstart;
-                       res->size = rend - rstart + 1;
-                       return 0;
-               }
-       }
-       return -1;
-}
index 527136b2238496c890583a2d44838bf37f3878e3..f4e516e9c37cc4c62f93faf92131a632098d2ca5 100644 (file)
@@ -128,6 +128,9 @@ config SPARSEMEM_VMEMMAP
         pfn_to_page and page_to_pfn operations.  This is the most
         efficient option when sufficient kernel resources are available.
 
+config HAVE_MEMBLOCK
+       boolean
+
 # eventually, we can have this option just 'select SPARSEMEM'
 config MEMORY_HOTPLUG
        bool "Allow for memory hot-add"
index 8982504bd03bb783ab8fe2d0dce2c08010f3e66d..34b2546a9e37e9b2f08891cb7a3242b2fe89c86a 100644 (file)
@@ -15,6 +15,8 @@ obj-y                 := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
                           $(mmu-y)
 obj-y += init-mm.o
 
+obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
+
 obj-$(CONFIG_BOUNCE)   += bounce.o
 obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o thrash.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
index 58c66cc5056a7073aabf880116548a9572c2894b..142c84a54993a75ca2fbd5346e3eb5b16552f390 100644 (file)
@@ -833,15 +833,24 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
                                   unsigned long align, unsigned long goal)
 {
+       void *ptr;
+
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
 #ifdef CONFIG_NO_BOOTMEM
-       return __alloc_memory_core_early(pgdat->node_id, size, align,
+       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+                                        goal, -1ULL);
+       if (ptr)
+               return ptr;
+
+       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
                                         goal, -1ULL);
 #else
-       return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+       ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
 #endif
+
+       return ptr;
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -977,14 +986,21 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
                                       unsigned long align, unsigned long goal)
 {
+       void *ptr;
+
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
 #ifdef CONFIG_NO_BOOTMEM
-       return __alloc_memory_core_early(pgdat->node_id, size, align,
+       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+                               goal, ARCH_LOW_ADDRESS_LIMIT);
+       if (ptr)
+               return ptr;
+       ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
                                goal, ARCH_LOW_ADDRESS_LIMIT);
 #else
-       return ___alloc_bootmem_node(pgdat->bdata, size, align,
+       ptr = ___alloc_bootmem_node(pgdat->bdata, size, align,
                                goal, ARCH_LOW_ADDRESS_LIMIT);
 #endif
+       return ptr;
 }
diff --git a/mm/memblock.c b/mm/memblock.c
new file mode 100644 (file)
index 0000000..3024eb3
--- /dev/null
@@ -0,0 +1,541 @@
+/*
+ * Procedures for maintaining information about logical memory blocks.
+ *
+ * Peter Bergner, IBM Corp.    June 2001.
+ * Copyright (C) 2001 Peter Bergner.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/memblock.h>
+
+#define MEMBLOCK_ALLOC_ANYWHERE        0
+
+struct memblock memblock;
+
+static int memblock_debug;
+
+static int __init early_memblock(char *p)
+{
+       if (p && strstr(p, "debug"))
+               memblock_debug = 1;
+       return 0;
+}
+early_param("memblock", early_memblock);
+
+static void memblock_dump(struct memblock_region *region, char *name)
+{
+       unsigned long long base, size;
+       int i;
+
+       pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
+
+       for (i = 0; i < region->cnt; i++) {
+               base = region->region[i].base;
+               size = region->region[i].size;
+
+               pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
+                   name, i, base, base + size - 1, size);
+       }
+}
+
+void memblock_dump_all(void)
+{
+       if (!memblock_debug)
+               return;
+
+       pr_info("MEMBLOCK configuration:\n");
+       pr_info(" rmo_size    = 0x%llx\n", (unsigned long long)memblock.rmo_size);
+       pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size);
+
+       memblock_dump(&memblock.memory, "memory");
+       memblock_dump(&memblock.reserved, "reserved");
+}
+
+static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2,
+                                       u64 size2)
+{
+       return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
+}
+
+static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
+{
+       if (base2 == base1 + size1)
+               return 1;
+       else if (base1 == base2 + size2)
+               return -1;
+
+       return 0;
+}
+
+static long memblock_regions_adjacent(struct memblock_region *rgn,
+               unsigned long r1, unsigned long r2)
+{
+       u64 base1 = rgn->region[r1].base;
+       u64 size1 = rgn->region[r1].size;
+       u64 base2 = rgn->region[r2].base;
+       u64 size2 = rgn->region[r2].size;
+
+       return memblock_addrs_adjacent(base1, size1, base2, size2);
+}
+
+static void memblock_remove_region(struct memblock_region *rgn, unsigned long r)
+{
+       unsigned long i;
+
+       for (i = r; i < rgn->cnt - 1; i++) {
+               rgn->region[i].base = rgn->region[i + 1].base;
+               rgn->region[i].size = rgn->region[i + 1].size;
+       }
+       rgn->cnt--;
+}
+
+/* Assumption: base addr of region 1 < base addr of region 2 */
+static void memblock_coalesce_regions(struct memblock_region *rgn,
+               unsigned long r1, unsigned long r2)
+{
+       rgn->region[r1].size += rgn->region[r2].size;
+       memblock_remove_region(rgn, r2);
+}
+
+void __init memblock_init(void)
+{
+       /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
+        * This simplifies the memblock_add() code below...
+        */
+       memblock.memory.region[0].base = 0;
+       memblock.memory.region[0].size = 0;
+       memblock.memory.cnt = 1;
+
+       /* Ditto. */
+       memblock.reserved.region[0].base = 0;
+       memblock.reserved.region[0].size = 0;
+       memblock.reserved.cnt = 1;
+}
+
+void __init memblock_analyze(void)
+{
+       int i;
+
+       memblock.memory.size = 0;
+
+       for (i = 0; i < memblock.memory.cnt; i++)
+               memblock.memory.size += memblock.memory.region[i].size;
+}
+
+static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size)
+{
+       unsigned long coalesced = 0;
+       long adjacent, i;
+
+       if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
+               rgn->region[0].base = base;
+               rgn->region[0].size = size;
+               return 0;
+       }
+
+       /* First try and coalesce this MEMBLOCK with another. */
+       for (i = 0; i < rgn->cnt; i++) {
+               u64 rgnbase = rgn->region[i].base;
+               u64 rgnsize = rgn->region[i].size;
+
+               if ((rgnbase == base) && (rgnsize == size))
+                       /* Already have this region, so we're done */
+                       return 0;
+
+               adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
+               if (adjacent > 0) {
+                       rgn->region[i].base -= size;
+                       rgn->region[i].size += size;
+                       coalesced++;
+                       break;
+               } else if (adjacent < 0) {
+                       rgn->region[i].size += size;
+                       coalesced++;
+                       break;
+               }
+       }
+
+       if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) {
+               memblock_coalesce_regions(rgn, i, i+1);
+               coalesced++;
+       }
+
+       if (coalesced)
+               return coalesced;
+       if (rgn->cnt >= MAX_MEMBLOCK_REGIONS)
+               return -1;
+
+       /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
+       for (i = rgn->cnt - 1; i >= 0; i--) {
+               if (base < rgn->region[i].base) {
+                       rgn->region[i+1].base = rgn->region[i].base;
+                       rgn->region[i+1].size = rgn->region[i].size;
+               } else {
+                       rgn->region[i+1].base = base;
+                       rgn->region[i+1].size = size;
+                       break;
+               }
+       }
+
+       if (base < rgn->region[0].base) {
+               rgn->region[0].base = base;
+               rgn->region[0].size = size;
+       }
+       rgn->cnt++;
+
+       return 0;
+}
+
+long memblock_add(u64 base, u64 size)
+{
+       struct memblock_region *_rgn = &memblock.memory;
+
+       /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
+       if (base == 0)
+               memblock.rmo_size = size;
+
+       return memblock_add_region(_rgn, base, size);
+
+}
+
+static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size)
+{
+       u64 rgnbegin, rgnend;
+       u64 end = base + size;
+       int i;
+
+       rgnbegin = rgnend = 0; /* supress gcc warnings */
+
+       /* Find the region where (base, size) belongs to */
+       for (i=0; i < rgn->cnt; i++) {
+               rgnbegin = rgn->region[i].base;
+               rgnend = rgnbegin + rgn->region[i].size;
+
+               if ((rgnbegin <= base) && (end <= rgnend))
+                       break;
+       }
+
+       /* Didn't find the region */
+       if (i == rgn->cnt)
+               return -1;
+
+       /* Check to see if we are removing entire region */
+       if ((rgnbegin == base) && (rgnend == end)) {
+               memblock_remove_region(rgn, i);
+               return 0;
+       }
+
+       /* Check to see if region is matching at the front */
+       if (rgnbegin == base) {
+               rgn->region[i].base = end;
+               rgn->region[i].size -= size;
+               return 0;
+       }
+
+       /* Check to see if the region is matching at the end */
+       if (rgnend == end) {
+               rgn->region[i].size -= size;
+               return 0;
+       }
+
+       /*
+        * We need to split the entry -  adjust the current one to the
+        * beginging of the hole and add the region after hole.
+        */
+       rgn->region[i].size = base - rgn->region[i].base;
+       return memblock_add_region(rgn, end, rgnend - end);
+}
+
+long memblock_remove(u64 base, u64 size)
+{
+       return __memblock_remove(&memblock.memory, base, size);
+}
+
+long __init memblock_free(u64 base, u64 size)
+{
+       return __memblock_remove(&memblock.reserved, base, size);
+}
+
+long __init memblock_reserve(u64 base, u64 size)
+{
+       struct memblock_region *_rgn = &memblock.reserved;
+
+       BUG_ON(0 == size);
+
+       return memblock_add_region(_rgn, base, size);
+}
+
+long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size)
+{
+       unsigned long i;
+
+       for (i = 0; i < rgn->cnt; i++) {
+               u64 rgnbase = rgn->region[i].base;
+               u64 rgnsize = rgn->region[i].size;
+               if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
+                       break;
+       }
+
+       return (i < rgn->cnt) ? i : -1;
+}
+
+static u64 memblock_align_down(u64 addr, u64 size)
+{
+       return addr & ~(size - 1);
+}
+
+static u64 memblock_align_up(u64 addr, u64 size)
+{
+       return (addr + (size - 1)) & ~(size - 1);
+}
+
+static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end,
+                                          u64 size, u64 align)
+{
+       u64 base, res_base;
+       long j;
+
+       base = memblock_align_down((end - size), align);
+       while (start <= base) {
+               j = memblock_overlaps_region(&memblock.reserved, base, size);
+               if (j < 0) {
+                       /* this area isn't reserved, take it */
+                       if (memblock_add_region(&memblock.reserved, base, size) < 0)
+                               base = ~(u64)0;
+                       return base;
+               }
+               res_base = memblock.reserved.region[j].base;
+               if (res_base < size)
+                       break;
+               base = memblock_align_down(res_base - size, align);
+       }
+
+       return ~(u64)0;
+}
+
+static u64 __init memblock_alloc_nid_region(struct memblock_property *mp,
+                                      u64 (*nid_range)(u64, u64, int *),
+                                      u64 size, u64 align, int nid)
+{
+       u64 start, end;
+
+       start = mp->base;
+       end = start + mp->size;
+
+       start = memblock_align_up(start, align);
+       while (start < end) {
+               u64 this_end;
+               int this_nid;
+
+               this_end = nid_range(start, end, &this_nid);
+               if (this_nid == nid) {
+                       u64 ret = memblock_alloc_nid_unreserved(start, this_end,
+                                                          size, align);
+                       if (ret != ~(u64)0)
+                               return ret;
+               }
+               start = this_end;
+       }
+
+       return ~(u64)0;
+}
+
+u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
+                        u64 (*nid_range)(u64 start, u64 end, int *nid))
+{
+       struct memblock_region *mem = &memblock.memory;
+       int i;
+
+       BUG_ON(0 == size);
+
+       size = memblock_align_up(size, align);
+
+       for (i = 0; i < mem->cnt; i++) {
+               u64 ret = memblock_alloc_nid_region(&mem->region[i],
+                                              nid_range,
+                                              size, align, nid);
+               if (ret != ~(u64)0)
+                       return ret;
+       }
+
+       return memblock_alloc(size, align);
+}
+
+u64 __init memblock_alloc(u64 size, u64 align)
+{
+       return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+}
+
+u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr)
+{
+       u64 alloc;
+
+       alloc = __memblock_alloc_base(size, align, max_addr);
+
+       if (alloc == 0)
+               panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
+                     (unsigned long long) size, (unsigned long long) max_addr);
+
+       return alloc;
+}
+
+u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr)
+{
+       long i, j;
+       u64 base = 0;
+       u64 res_base;
+
+       BUG_ON(0 == size);
+
+       size = memblock_align_up(size, align);
+
+       /* On some platforms, make sure we allocate lowmem */
+       /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */
+       if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
+               max_addr = MEMBLOCK_REAL_LIMIT;
+
+       for (i = memblock.memory.cnt - 1; i >= 0; i--) {
+               u64 memblockbase = memblock.memory.region[i].base;
+               u64 memblocksize = memblock.memory.region[i].size;
+
+               if (memblocksize < size)
+                       continue;
+               if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
+                       base = memblock_align_down(memblockbase + memblocksize - size, align);
+               else if (memblockbase < max_addr) {
+                       base = min(memblockbase + memblocksize, max_addr);
+                       base = memblock_align_down(base - size, align);
+               } else
+                       continue;
+
+               while (base && memblockbase <= base) {
+                       j = memblock_overlaps_region(&memblock.reserved, base, size);
+                       if (j < 0) {
+                               /* this area isn't reserved, take it */
+                               if (memblock_add_region(&memblock.reserved, base, size) < 0)
+                                       return 0;
+                               return base;
+                       }
+                       res_base = memblock.reserved.region[j].base;
+                       if (res_base < size)
+                               break;
+                       base = memblock_align_down(res_base - size, align);
+               }
+       }
+       return 0;
+}
+
+/* You must call memblock_analyze() before this. */
+u64 __init memblock_phys_mem_size(void)
+{
+       return memblock.memory.size;
+}
+
+u64 memblock_end_of_DRAM(void)
+{
+       int idx = memblock.memory.cnt - 1;
+
+       return (memblock.memory.region[idx].base + memblock.memory.region[idx].size);
+}
+
+/* You must call memblock_analyze() after this. */
+void __init memblock_enforce_memory_limit(u64 memory_limit)
+{
+       unsigned long i;
+       u64 limit;
+       struct memblock_property *p;
+
+       if (!memory_limit)
+               return;
+
+       /* Truncate the memblock regions to satisfy the memory limit. */
+       limit = memory_limit;
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               if (limit > memblock.memory.region[i].size) {
+                       limit -= memblock.memory.region[i].size;
+                       continue;
+               }
+
+               memblock.memory.region[i].size = limit;
+               memblock.memory.cnt = i + 1;
+               break;
+       }
+
+       if (memblock.memory.region[0].size < memblock.rmo_size)
+               memblock.rmo_size = memblock.memory.region[0].size;
+
+       memory_limit = memblock_end_of_DRAM();
+
+       /* And truncate any reserves above the limit also. */
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               p = &memblock.reserved.region[i];
+
+               if (p->base > memory_limit)
+                       p->size = 0;
+               else if ((p->base + p->size) > memory_limit)
+                       p->size = memory_limit - p->base;
+
+               if (p->size == 0) {
+                       memblock_remove_region(&memblock.reserved, i);
+                       i--;
+               }
+       }
+}
+
+int __init memblock_is_reserved(u64 addr)
+{
+       int i;
+
+       for (i = 0; i < memblock.reserved.cnt; i++) {
+               u64 upper = memblock.reserved.region[i].base +
+                       memblock.reserved.region[i].size - 1;
+               if ((addr >= memblock.reserved.region[i].base) && (addr <= upper))
+                       return 1;
+       }
+       return 0;
+}
+
+int memblock_is_region_reserved(u64 base, u64 size)
+{
+       return memblock_overlaps_region(&memblock.reserved, base, size);
+}
+
+/*
+ * Given a <base, len>, find which memory regions belong to this range.
+ * Adjust the request and return a contiguous chunk.
+ */
+int memblock_find(struct memblock_property *res)
+{
+       int i;
+       u64 rstart, rend;
+
+       rstart = res->base;
+       rend = rstart + res->size - 1;
+
+       for (i = 0; i < memblock.memory.cnt; i++) {
+               u64 start = memblock.memory.region[i].base;
+               u64 end = start + memblock.memory.region[i].size - 1;
+
+               if (start > rend)
+                       return -1;
+
+               if ((end >= rstart) && (start < rend)) {
+                       /* adjust the request */
+                       if (rstart < start)
+                               rstart = start;
+                       if (rend > end)
+                               rend = end;
+                       res->base = rstart;
+                       res->size = rend - rstart + 1;
+                       return 0;
+               }
+       }
+       return -1;
+}
index 431214b941acec39f68e171e468a59f74b9b749e..9bd339eb04c6c84691232bc7e499947fe9d13942 100644 (file)
@@ -3634,6 +3634,9 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
        int i;
        void *ptr;
 
+       if (limit > get_max_mapped())
+               limit = get_max_mapped();
+
        /* need to go over early_node_map to find out good range for node */
        for_each_active_range_index_in_nid(i, nid) {
                u64 addr;
@@ -3659,6 +3662,11 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
                ptr = phys_to_virt(addr);
                memset(ptr, 0, size);
                reserve_early_without_check(addr, addr + size, "BOOTMEM");
+               /*
+                * The min_count is set to 0 so that bootmem allocated blocks
+                * are never reported as leaks.
+                */
+               kmemleak_alloc(ptr, size, 0, 0);
                return ptr;
        }
 
index 6c0081441a326a174f83ebd66485bd9cb961f88d..5bffada7cde17a383e5ba510c2cca4be23463042 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/vmalloc.h>
 #include <linux/cgroup.h>
 #include <linux/swapops.h>
+#include <linux/kmemleak.h>
 
 static void __meminit
 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -126,6 +127,12 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
                        if (!base)
                                base = vmalloc(table_size);
                }
+               /*
+                * The value stored in section->page_cgroup is (base - pfn)
+                * and it does not point to the memory block allocated above,
+                * causing kmemleak false positives.
+                */
+               kmemleak_not_leak(base);
        } else {
                /*
                 * We don't have to allocate page_cgroup again, but
index 9c7e57cc63a34f7231b77a7d8b395d3157a34ba6..b94fe1b3da435f34f567e8a23358ec18ccd452ad 100644 (file)
@@ -213,8 +213,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
-               unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
+               unsigned long max_pass;
 
+               max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
                delta = (4 * scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -242,8 +243,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        int shrink_ret;
                        int nr_before;
 
-                       nr_before = (*shrinker->shrink)(0, gfp_mask);
-                       shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
+                       nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
+                       shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
+                                                               gfp_mask);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
@@ -296,7 +298,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi)
 static void handle_write_error(struct address_space *mapping,
                                struct page *page, int error)
 {
-       lock_page(page);
+       lock_page_nosync(page);
        if (page_mapping(page) == mapping)
                mapping_set_error(mapping, error);
        unlock_page(page);
index b10e3cdb08f87358ca64d0db8cf83c27f5ad624a..800b6b9fbbaefe15c90406e7631acd4e25b172b3 100644 (file)
@@ -358,6 +358,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
                acl->sec_level = sec_level;
                acl->auth_type = auth_type;
                hci_acl_connect(acl);
+       } else {
+               if (acl->sec_level < sec_level)
+                       acl->sec_level = sec_level;
+               if (acl->auth_type < auth_type)
+                       acl->auth_type = auth_type;
        }
 
        if (type == ACL_LINK)
index 6c57fc71c7e2d0df7c75daf1054901cf7ed8eee6..786b5de0bac42819ae3e207c942840a69f2c13da 100644 (file)
@@ -1049,6 +1049,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
        if (conn) {
                if (!ev->status)
                        conn->link_mode |= HCI_LM_AUTH;
+               else
+                       conn->sec_level = BT_SECURITY_LOW;
 
                clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
 
index 1b682a5aa0616911c2861b5d836c015230922faf..cf3c4073a8a655d6e9d8936cb61d30254224962f 100644 (file)
@@ -401,6 +401,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
        l2cap_send_sframe(pi, control);
 }
 
+static inline int __l2cap_no_conn_pending(struct sock *sk)
+{
+       return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
+}
+
 static void l2cap_do_start(struct sock *sk)
 {
        struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -409,12 +414,13 @@ static void l2cap_do_start(struct sock *sk)
                if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
                        return;
 
-               if (l2cap_check_security(sk)) {
+               if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
                        struct l2cap_conn_req req;
                        req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                        req.psm  = l2cap_pi(sk)->psm;
 
                        l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
 
                        l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                        L2CAP_CONN_REQ, sizeof(req), &req);
@@ -464,12 +470,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                }
 
                if (sk->sk_state == BT_CONNECT) {
-                       if (l2cap_check_security(sk)) {
+                       if (l2cap_check_security(sk) &&
+                                       __l2cap_no_conn_pending(sk)) {
                                struct l2cap_conn_req req;
                                req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                                req.psm  = l2cap_pi(sk)->psm;
 
                                l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
 
                                l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                        L2CAP_CONN_REQ, sizeof(req), &req);
@@ -2912,7 +2920,6 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
                l2cap_pi(sk)->ident = 0;
                l2cap_pi(sk)->dcid = dcid;
                l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-
                l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
 
                l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
@@ -4404,6 +4411,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                                req.psm  = l2cap_pi(sk)->psm;
 
                                l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
 
                                l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                        L2CAP_CONN_REQ, sizeof(req), &req);
index eedf2c94820e26150d8823c2aa1652626c5f281d..753fc4221f3c3587e41dfd816fd6e2771578a243 100644 (file)
@@ -217,14 +217,6 @@ static bool br_devices_support_netpoll(struct net_bridge *br)
        return count != 0 && ret;
 }
 
-static void br_poll_controller(struct net_device *br_dev)
-{
-       struct netpoll *np = br_dev->npinfo->netpoll;
-
-       if (np->real_dev != br_dev)
-               netpoll_poll_dev(np->real_dev);
-}
-
 void br_netpoll_cleanup(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -295,7 +287,6 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_do_ioctl            = br_dev_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_cleanup     = br_netpoll_cleanup,
-       .ndo_poll_controller     = br_poll_controller,
 #endif
 };
 
index a4e72a89e4ffc4eae7a2e7e09963bd0fb58e26a4..595da45f908854d0c9eef2e54ba94280de5afcfb 100644 (file)
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
                        kfree_skb(skb);
                else {
                        skb_push(skb, ETH_HLEN);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-                       if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
-                               netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
-                               skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
-                       } else
-#endif
-                               dev_queue_xmit(skb);
+                       dev_queue_xmit(skb);
                }
        }
 
@@ -73,23 +66,9 @@ int br_forward_finish(struct sk_buff *skb)
 
 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       struct net_bridge *br = to->br;
-       if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
-               struct netpoll *np;
-               to->dev->npinfo = skb->dev->npinfo;
-               np = skb->dev->npinfo->netpoll;
-               np->real_dev = np->dev = to->dev;
-               to->dev->priv_flags |= IFF_IN_NETPOLL;
-       }
-#endif
        skb->dev = to->dev;
        NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
                br_forward_finish);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       if (skb->dev->npinfo)
-               skb->dev->npinfo->netpoll->dev = br->dev;
-#endif
 }
 
 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
index 723a34710ad401ff24afd5b1be1af86505f7da33..0ea10f849be862fff3219bc69ed4c5db0cbd9253 100644 (file)
@@ -1911,8 +1911,16 @@ static int dev_gso_segment(struct sk_buff *skb)
  */
 static inline void skb_orphan_try(struct sk_buff *skb)
 {
-       if (!skb_tx(skb)->flags)
+       struct sock *sk = skb->sk;
+
+       if (sk && !skb_tx(skb)->flags) {
+               /* skb_tx_hash() wont be able to get sk.
+                * We copy sk_hash into skb->rxhash
+                */
+               if (!skb->rxhash)
+                       skb->rxhash = sk->sk_hash;
                skb_orphan(skb);
+       }
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1998,8 +2006,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
-               hash = (__force u16) skb->protocol;
-
+               hash = (__force u16) skb->protocol ^ skb->rxhash;
        hash = jhash_1word(hash, hashrnd);
 
        return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
@@ -2022,12 +2029,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
 {
-       u16 queue_index;
+       int queue_index;
        struct sock *sk = skb->sk;
 
-       if (sk_tx_queue_recorded(sk)) {
-               queue_index = sk_tx_queue_get(sk);
-       } else {
+       queue_index = sk_tx_queue_get(sk);
+       if (queue_index < 0) {
                const struct net_device_ops *ops = dev->netdev_ops;
 
                if (ops->ndo_select_queue) {
index 6ba1c0eece039f99f19c0451bf5a52b3b9090a71..a4e0a7482c2bc878bdd6e0986ac45e0dc6659b3d 100644 (file)
@@ -949,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh)
 {
        struct hh_cache *hh;
        void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
-               = neigh->dev->header_ops->cache_update;
+               = NULL;
+
+       if (neigh->dev->header_ops)
+               update = neigh->dev->header_ops->cache_update;
 
        if (update) {
                for (hh = neigh->hh; hh; hh = hh->hh_next) {
index c51b55400dc56818a2f4e0cca68cb74be38048a3..11201784d29a32d6fdddd972a8348d3202de0ec9 100644 (file)
@@ -1,7 +1,7 @@
 menuconfig NET_DSA
        bool "Distributed Switch Architecture support"
        default n
-       depends on EXPERIMENTAL && !S390
+       depends on EXPERIMENTAL && NET_ETHERNET && !S390
        select PHYLIB
        ---help---
          This allows you to use hardware switch chips that use
index 757f25eb9b4b2404ebebc6c4422b4ad1693ea227..7f6273506eea46522a17ff85b9a451cb18fdc7d3 100644 (file)
@@ -442,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
        int err;
 
        err = ipmr_fib_lookup(net, &fl, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        read_lock(&mrt_lock);
        dev->stats.tx_bytes += skb->len;
@@ -1728,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb)
                goto dont_forward;
 
        err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        if (!local) {
                    if (IPCB(skb)->opt.router_alert) {
index 6596b4feeddc7879fc020606f53dabdc8608f3e8..65afeaec15b7ebb85610650353e881aacb3e90aa 100644 (file)
@@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
        ssize_t spliced;
        int ret;
 
+       sock_rps_record_flow(sk);
        /*
         * We can't seek on a socket input
         */
index b4ed957f201a6f3f2ebf8d5564edec567b6435a6..7ed9dc1042d1930a7eb2107def30642cb367fa92 100644 (file)
@@ -2208,6 +2208,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        int mib_idx;
        int fwd_rexmitting = 0;
 
+       if (!tp->packets_out)
+               return;
+
        if (!tp->lost_out)
                tp->retransmit_high = tp->snd_una;
 
index 2794b6002836f446c4c6fb9835316d9ee61bba51..d6e9599d0705a5b79749a0566cbd97dfaa9993c0 100644 (file)
@@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type =
 
 static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
 {
+       struct ipv6hdr *iph = ipv6_hdr(skb);
        struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
        int err = rt2->rt_hdr.nexthdr;
 
        spin_lock(&x->lock);
-       if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) &&
+       if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
            !ipv6_addr_any((struct in6_addr *)x->coaddr))
                err = -ENOENT;
        spin_unlock(&x->lock);
index 94d72e85a475ae094d14f8df5f4f54a936f0db15..b2a3ae6cad78e28324e23b857dc0a5773f569786 100644 (file)
@@ -698,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
                newsk = NULL;
                goto out;
        }
+       kfree_skb(oskb);
 
        sock_hold(sk);
        pep_sk(newsk)->listener = sk;
index 570949417f388735e93bf887294326a8f24aa534..724553e8ed7bc9d8ecd668c71ab9373936a3d2d3 100644 (file)
@@ -205,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
        {
                struct icmphdr *icmph;
 
-               if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+               if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
                        goto drop;
 
                icmph = (void *)(skb_network_header(skb) + ihl);
@@ -215,6 +215,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
                    (icmph->type != ICMP_PARAMETERPROB))
                        break;
 
+               if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+                       goto drop;
+
                iph = (void *)(icmph + 1);
                if (egress)
                        addr = iph->daddr;
index af1c173be4ad88906d55d7efa24fb47fcd0cf19d..a7ec5a8a2380e6277fa4a3755e01fb0aa58076fb 100644 (file)
@@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 
        /* Try to instantiate a bundle */
        err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
-       if (err < 0) {
-               if (err != -EAGAIN)
+       if (err <= 0) {
+               if (err != 0 && err != -EAGAIN)
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
                return ERR_PTR(err);
        }
@@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
                        goto make_dummy_bundle;
                dst_hold(&xdst->u.dst);
                return oldflo;
+       } else if (new_xdst == NULL) {
+               num_xfrms = 0;
+               if (oldflo == NULL)
+                       goto make_dummy_bundle;
+               xdst->num_xfrms = 0;
+               dst_hold(&xdst->u.dst);
+               return oldflo;
        }
 
        /* Kill the previous bundle */
@@ -1760,6 +1767,10 @@ restart:
                                xfrm_pols_put(pols, num_pols);
                                err = PTR_ERR(xdst);
                                goto dropdst;
+                       } else if (xdst == NULL) {
+                               num_xfrms = 0;
+                               drop_pols = num_pols;
+                               goto no_transform;
                        }
 
                        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
index 3592057829648a8d3f8ac909f93a10a729640b68..fd7407c7205c7be809ce4c3b048e68ab9b7b9487 100644 (file)
@@ -107,7 +107,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
                goto out_free_syms;
        err = 0;
        if (symbol_conf.use_callchain) {
-               err = append_chain(he->callchain, data->callchain, syms);
+               err = append_chain(he->callchain, data->callchain, syms, data->period);
                if (err)
                        goto out_free_syms;
        }
index 49ece7921914a3fdc20ea02d56af193ab61a73f9..97d76562a1a093c8a4bfc6f4bca652fa410779e4 100755 (executable)
@@ -5,17 +5,13 @@ if [ $# -eq 1 ]  ; then
 fi
 
 GVF=${OUTPUT}PERF-VERSION-FILE
-DEF_VER=v0.0.2.PERF
 
 LF='
 '
 
-# First see if there is a version file (included in release tarballs),
-# then try git-describe, then default.
-if test -f version
-then
-       VN=$(cat version) || VN="$DEF_VER"
-elif test -d .git -o -f .git &&
+# First check if there is a .git to get the version from git describe
+# otherwise try to get the version from the kernel makefile
+if test -d ../../.git -o -f ../../.git &&
        VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
        case "$VN" in
        *$LF*) (exit 1) ;;
@@ -27,7 +23,12 @@ elif test -d .git -o -f .git &&
 then
        VN=$(echo "$VN" | sed -e 's/-/./g');
 else
-       VN="$DEF_VER"
+       eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '`
+
+       VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
 fi
 
 VN=$(expr "$VN" : v*'\(.*\)')
index 62b69ad4aa735bc0ec146277661e30f9c0e3d4e9..52c777e451ed8ebc049f39edfb651bfe848d66cc 100644 (file)
@@ -230,7 +230,7 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
 
 static void
 add_child(struct callchain_node *parent, struct resolved_chain *chain,
-         int start)
+         int start, u64 period)
 {
        struct callchain_node *new;
 
@@ -238,7 +238,7 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
        fill_node(new, chain, start);
 
        new->children_hit = 0;
-       new->hit = 1;
+       new->hit = period;
 }
 
 /*
@@ -248,7 +248,8 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
  */
 static void
 split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
-               struct callchain_list *to_split, int idx_parents, int idx_local)
+               struct callchain_list *to_split, int idx_parents, int idx_local,
+               u64 period)
 {
        struct callchain_node *new;
        struct list_head *old_tail;
@@ -275,41 +276,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
        /* create a new child for the new branch if any */
        if (idx_total < chain->nr) {
                parent->hit = 0;
-               add_child(parent, chain, idx_total);
-               parent->children_hit++;
+               add_child(parent, chain, idx_total, period);
+               parent->children_hit += period;
        } else {
-               parent->hit = 1;
+               parent->hit = period;
        }
 }
 
 static int
 __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start);
+              unsigned int start, u64 period);
 
 static void
 __append_chain_children(struct callchain_node *root,
                        struct resolved_chain *chain,
-                       unsigned int start)
+                       unsigned int start, u64 period)
 {
        struct callchain_node *rnode;
 
        /* lookup in childrens */
        chain_for_each_child(rnode, root) {
-               unsigned int ret = __append_chain(rnode, chain, start);
+               unsigned int ret = __append_chain(rnode, chain, start, period);
 
                if (!ret)
                        goto inc_children_hit;
        }
        /* nothing in children, add to the current node */
-       add_child(root, chain, start);
+       add_child(root, chain, start, period);
 
 inc_children_hit:
-       root->children_hit++;
+       root->children_hit += period;
 }
 
 static int
 __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start)
+              unsigned int start, u64 period)
 {
        struct callchain_list *cnode;
        unsigned int i = start;
@@ -345,18 +346,18 @@ __append_chain(struct callchain_node *root, struct resolved_chain *chain,
 
        /* we match only a part of the node. Split it and add the new chain */
        if (i - start < root->val_nr) {
-               split_add_child(root, chain, cnode, start, i - start);
+               split_add_child(root, chain, cnode, start, i - start, period);
                return 0;
        }
 
        /* we match 100% of the path, increment the hit */
        if (i - start == root->val_nr && i == chain->nr) {
-               root->hit++;
+               root->hit += period;
                return 0;
        }
 
        /* We match the node and still have a part remaining */
-       __append_chain_children(root, chain, i);
+       __append_chain_children(root, chain, i, period);
 
        return 0;
 }
@@ -380,7 +381,7 @@ static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
 
 
 int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms)
+                struct map_symbol *syms, u64 period)
 {
        struct resolved_chain *filtered;
 
@@ -397,7 +398,7 @@ int append_chain(struct callchain_node *root, struct ip_callchain *chain,
        if (!filtered->nr)
                goto end;
 
-       __append_chain_children(root, filtered, 0);
+       __append_chain_children(root, filtered, 0, period);
 end:
        free(filtered);
 
index 1ca73e4a2723997099973963f8477bdbbafcc909..f2e9ee164bd8ce7bcf2bb7c7af5901e6c78ebcc3 100644 (file)
@@ -49,6 +49,9 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->brothers);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
+
+       node->parent = NULL;
+       node->hit = 0;
 }
 
 static inline u64 cumul_hits(struct callchain_node *node)
@@ -58,7 +61,7 @@ static inline u64 cumul_hits(struct callchain_node *node)
 
 int register_callchain_param(struct callchain_param *param);
 int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms);
+                struct map_symbol *syms, u64 period);
 
 bool ip_callchain__valid(struct ip_callchain *chain, event_t *event);
 #endif /* __PERF_CALLCHAIN_H */